index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
66,851 | drunkpig/django-background-job | refs/heads/main | /background_job/apps.py | from django.apps import AppConfig
class BackgroundJobConfig(AppConfig):
name = 'background_job'
| {"/background_job/Scheduler.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/utils.py": ["/background_job/models.py"], "/background_job/job.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/models.py": ["/background_job/Trigger.py"], "/background_job/management/commands/process_tasks.py": ["/background_job/JobProcessor.py", "/background_job/Scheduler.py"], "/background_job/admin.py": ["/background_job/models.py"], "/example/jobs.py": ["/background_job/job.py"], "/background_job/JobProcessor.py": ["/background_job/models.py", "/background_job/utils.py"], "/example/test2.py": ["/example/jobs.py"]} |
66,852 | drunkpig/django-background-job | refs/heads/main | /background_job/migrations/0004_auto_20210225_0921.py | # Generated by Django 3.1.6 on 2021-02-25 09:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('background_job', '0003_auto_20210224_2354'),
]
operations = [
migrations.AlterField(
model_name='actionlog',
name='op_host',
field=models.CharField(max_length=128, verbose_name='操作来源'),
),
migrations.AlterField(
model_name='jobexechistory',
name='id',
field=models.CharField(max_length=64, primary_key=True, serialize=False),
),
]
| {"/background_job/Scheduler.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/utils.py": ["/background_job/models.py"], "/background_job/job.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/models.py": ["/background_job/Trigger.py"], "/background_job/management/commands/process_tasks.py": ["/background_job/JobProcessor.py", "/background_job/Scheduler.py"], "/background_job/admin.py": ["/background_job/models.py"], "/example/jobs.py": ["/background_job/job.py"], "/background_job/JobProcessor.py": ["/background_job/models.py", "/background_job/utils.py"], "/example/test2.py": ["/example/jobs.py"]} |
66,853 | drunkpig/django-background-job | refs/heads/main | /background_job/utils.py | import logging, socket
from django.db.models import Max
from background_job.models import DjangoJob, JobExecHistory, ActionLog
logger = logging.getLogger()
def get_max_job_version():
max_version = DjangoJob.objects.aggregate(Max('version')) # {'version__max': 5}
if max_version:
max_version = max_version['version__max']
if max_version is None:
max_version = 0
return max_version
def log_job_history(job, status, result=None, trace_message=None):
logger.info(f"job name={job.job_name}, status ={status}, instance_id={job.instance_id}")
try:
id = job.instance_id
values = {"job_name":job.job_name, "job":job, "trigger_type":job.trigger_type,
"version":job.version, "status":status, "result":result, "trace_message":trace_message}
JobExecHistory.objects.update_or_create(id=id, defaults=values)
except Exception as e:
logger.exception(e)
logger.error(f"job name={job.job_name}, status ={status}, instance_id={job.instance_id}")
def log_job_history_by_id(job_id, job_instance_id, status, result=None, trace_message=None):
logger.info(f"job name={job_id}, status ={status}, instance_id={job_instance_id}")
try:
id = job_instance_id
job = DjangoJob.objects.get(id=job_id)
values = {"job_name": job.job_name, "job": job, "trigger_type": job.trigger_type,
"version": job.version, "status": status, "result": result, "trace_message": trace_message}
JobExecHistory.objects.update_or_create(id=id, defaults=values)
except Exception as e:
logger.exception(e)
def log_action(action):
try:
host_name = socket.gethostname()
ActionLog.objects.create(action=action, op_host=host_name)
except Exception as e:
logger.exception(e)
| {"/background_job/Scheduler.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/utils.py": ["/background_job/models.py"], "/background_job/job.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/models.py": ["/background_job/Trigger.py"], "/background_job/management/commands/process_tasks.py": ["/background_job/JobProcessor.py", "/background_job/Scheduler.py"], "/background_job/admin.py": ["/background_job/models.py"], "/example/jobs.py": ["/background_job/job.py"], "/background_job/JobProcessor.py": ["/background_job/models.py", "/background_job/utils.py"], "/example/test2.py": ["/example/jobs.py"]} |
66,854 | drunkpig/django-background-job | refs/heads/main | /background_job/job.py | import functools, inspect
import logging, hashlib, json
from datetime import datetime
from background_job.models import DjangoJob, DelayedJob
from background_job.utils import get_max_job_version, log_action
logger = logging.getLogger(__name__)
JOB_VERSION = get_max_job_version() + 1
def md5(astring):
x = hashlib.md5(astring.encode()).hexdigest()
return x
def __x_job(name, trigger_type, trigger_exp, enable=True, max_instances=1, misfire_grace_time=3, coalesce=False,
log_succ_interval=0, log_err_interval=0, description=None, args=None, kwargs=None):
"""
"""
def inner(func):
job_id = md5(name)
mod_func = f"{inspect.getmodule(func).__name__}.{func.__name__ }"
job_parameters = {
'args': tuple(args) if args is not None else (),
'kwargs': dict(kwargs) if kwargs is not None else {},
}
values = { "job_name":name, "description":description,
"job_function":mod_func, "trigger_type":trigger_type,
"job_parameters":json.dumps(job_parameters), "version":JOB_VERSION,
"trigger_expression":trigger_exp, "max_instances":max_instances,
"misfire_grace_time":misfire_grace_time, "coalesce":coalesce,
"log_succ_interval":log_succ_interval, "log_err_interval":log_err_interval,}
obj, created = DjangoJob.objects.update_or_create(id=job_id,
defaults=values)
if created:
# 如果是新建,那么就把代码中的enable状态写入进去,否则保持手工修改的结果
obj.enable = enable
obj.save()
log_action(f"create new job: {name}")
else:
log_action(f"update new job: {name}")
@functools.wraps(func)
def real_func(*args, **kwargs):
return func(*args, **kwargs)
return real_func
return inner
def cron_job(name, cron, enable=True, max_instances=1, misfire_grace_time=2, coalesce=False,
log_succ_interval=0, log_err_interval=0, description=None, args=None, kwargs=None):
"""
name: 任务名称,自己随便写
cron: 字符串, cron表达式 TODO 验证表达式
parallel: 是否允许任务并行,当最大实例为1的时候
max_instances: 同一次触发最大的运行个数
misfire_grace_time: 超出执行点多久可以弥补
"""
return __x_job(name, 'cron', cron, max_instances=max_instances, enable=enable,
misfire_grace_time=misfire_grace_time, coalesce=coalesce,
log_succ_interval=log_succ_interval, log_err_interval=log_err_interval,
description=description, args=args, kwargs=kwargs)
def interval_job(name, weeks=0, days=0, hours=0, minutes=0, seconds=0, start_date=None, end_date=None,
max_instances=1, misfire_grace_time=2, coalesce=False, enable=True,
log_succ_interval=0, log_err_interval=0, description=None, args=None, kwargs=None):
interval_exp = json.dumps({"weeks":weeks,"days":days,"hours":hours,"minutes":minutes,"seconds":seconds,
"start_date":start_date,"end_date":end_date,})
return __x_job(name, 'interval', interval_exp, max_instances=max_instances,
misfire_grace_time=misfire_grace_time, coalesce=coalesce, enable=enable,
log_succ_interval=log_succ_interval, log_err_interval=log_err_interval,
description=description, args=args, kwargs=kwargs)
def once_job(name, run_at, max_instances=1, misfire_grace_time=2, coalesce=False, enable=True,
log_succ_interval=0, log_err_interval=0, description=None, args=None, kwargs=None):
return __x_job(name, 'once', run_at, max_instances=max_instances,
misfire_grace_time=misfire_grace_time, coalesce=coalesce, enable=enable,
log_succ_interval=log_succ_interval, log_err_interval=log_err_interval,
description=description, args=args, kwargs=kwargs)
def boot_job(name, enable=True, description=None, args=None, kwargs=None):
API_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
run_at = datetime.now().strftime(API_DATE_FORMAT)
return __x_job(name, 'boot_once', run_at, enable=enable, description=description, args=args, kwargs=kwargs)
def delayed_job(name, retry=3, enable=True, description=None):
"""
id: 保证对同一个func唯一性
nonce: 全局唯一,可用来保证幂等性
retry: 重试多少次,如果为-1则要保证一定能完成
"""
def inner(func):
@functools.wraps(func)
def real_func(*args, **kwargs):
mod_func = f"{inspect.getmodule(func).__name__}.{func.__name__}"
job_parameters = {
'args': tuple(args) if args is not None else (),
'kwargs': dict(kwargs) if kwargs is not None else {},
}
values = {"job_name": name, "description": description,
"job_function": mod_func, "job_parameters": json.dumps(job_parameters),
"retry":retry, "enable":enable, "version":JOB_VERSION,
}
DelayedJob.objects.create(**values)
return real_func
return inner
| {"/background_job/Scheduler.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/utils.py": ["/background_job/models.py"], "/background_job/job.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/models.py": ["/background_job/Trigger.py"], "/background_job/management/commands/process_tasks.py": ["/background_job/JobProcessor.py", "/background_job/Scheduler.py"], "/background_job/admin.py": ["/background_job/models.py"], "/example/jobs.py": ["/background_job/job.py"], "/background_job/JobProcessor.py": ["/background_job/models.py", "/background_job/utils.py"], "/example/test2.py": ["/example/jobs.py"]} |
66,855 | drunkpig/django-background-job | refs/heads/main | /background_job/migrations/0003_auto_20210224_2354.py | # Generated by Django 2.2.13 on 2021-02-24 23:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('background_job', '0002_auto_20210224_2353'),
]
operations = [
migrations.AlterField(
model_name='jobexechistory',
name='trace_message',
field=models.TextField(blank=True, null=True, verbose_name='追踪日志'),
),
]
| {"/background_job/Scheduler.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/utils.py": ["/background_job/models.py"], "/background_job/job.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/models.py": ["/background_job/Trigger.py"], "/background_job/management/commands/process_tasks.py": ["/background_job/JobProcessor.py", "/background_job/Scheduler.py"], "/background_job/admin.py": ["/background_job/models.py"], "/example/jobs.py": ["/background_job/job.py"], "/background_job/JobProcessor.py": ["/background_job/models.py", "/background_job/utils.py"], "/example/test2.py": ["/example/jobs.py"]} |
66,856 | drunkpig/django-background-job | refs/heads/main | /background_job/models.py | import copy
import datetime
import json
import uuid
from django.db import models
# Create your models here.
from django.utils.safestring import mark_safe
from background_job.Trigger import CronJobTrigger, IntervalJobTrigger, OnceJobTrigger
class DjangoJob(models.Model):
"""
定时任务执行情况
"""
id = models.CharField(max_length=64, primary_key=True)
job_name = models.CharField(max_length=128) # 任务名字
version = models.IntegerField(blank=False, null=False) # 版本,用于每次重启时选择最大版本运行
enable = models.BooleanField(default=True)
description = models.TextField(blank=True, null=True) # job作用描述
job_function = models.CharField(max_length=128, ) # 任务的函数名称
job_parameters = models.TextField(blank=True, )
trigger_type = models.CharField(max_length=128, choices=[["cron","cron"],["interval",'interval'],
['once','once'],['boot_once','boot_once']]) # cron, delayedjob, interval, once
trigger_expression = models.CharField(max_length=128) # cron表达式
max_instances = models.IntegerField(default=1)
misfire_grace_time = models.IntegerField(default=0)
coalesce = models.BooleanField(default=False) # 是否把错过的全都执行一遍
log_succ_interval = models.IntegerField(default=0) # 由于成功是大概率,为了减少写库压力可设置间隔多久记录一次成功日志,0则一直写
log_err_interval = models.IntegerField(default=0) # 如果遇到连续失败,记录策略
gmt_update = models.DateTimeField(auto_now=True) # 最后更新时间
gmt_created = models.DateTimeField(auto_now_add=True) # 创建时间
class Meta:
ordering = ('gmt_update', )
def instance(self):
instance = copy.copy(self)
instance.instance_id = str(uuid.uuid4())
return instance
def next_run_time(self):
if self.trigger_type=='cron':
trigger:CronJobTrigger = CronJobTrigger.from_crontab(self.trigger_expression)
seconds, dt = trigger.get_next_fire_time()
return seconds, dt
elif self.trigger_type=='interval':
trigger_args = json.loads(self.trigger_expression)
trigger:IntervalJobTrigger = IntervalJobTrigger(**trigger_args)
seconds, dt = trigger.get_next_fire_time()
return seconds, dt
elif self.trigger_type=='once':
trigger: OnceJobTrigger = OnceJobTrigger(self.trigger_expression)
seconds, dt = trigger.get_next_fire_time()
return seconds, dt
elif self.trigger_type=='boot_once':
delay_seconds = 10
delta = datetime.timedelta(seconds=delay_seconds)
dt = datetime.datetime.now()+delta
return delay_seconds, dt
else:
raise Exception("*********没有实现的trigger type")# TODO 根据 trigger_type
class DelayedJob(models.Model):
id = models.AutoField(primary_key=True)
job_name = models.CharField(max_length=128) # 任务名字
version = models.IntegerField(blank=False, null=False) # 版本,用于每次重启时选择最大版本运行
enable = models.BooleanField(default=True)
description = models.TextField(blank=True, null=True) # job作用描述
job_function = models.CharField(max_length=128, ) # 任务的函数名称
job_parameters = models.TextField(blank=True, )
retry = models.IntegerField(default=0)
retry_cnt = models.IntegerField(default=0)
gmt_update = models.DateTimeField(auto_now=True) # 最后更新时间
gmt_created = models.DateTimeField(auto_now_add=True) # 创建时间
class Meta:
ordering = ('gmt_update', )
class JobExecHistory(models.Model):
"""
任务执行情况
"""
NEW = "New"
RUNNING = "Running"
MAX_INSTANCES = "Max instances reached!"
MISSED = "Missed!"
ERROR = "Error!"
SUCCESS = "Success"
#id = models.AutoField(primary_key=True)
id = models.CharField(max_length=64, primary_key=True)
job = models.ForeignKey(DjangoJob, on_delete=models.CASCADE)
job_name = models.CharField(max_length=128, verbose_name="任务名称") # 任务名字
trigger_type = models.CharField(max_length=128, null=False, verbose_name="任务类型")
version = models.IntegerField(blank=False, null=False) # 版本,用于每次重启时选择最大版本运行
status = models.CharField(max_length=50, choices=[
[NEW,NEW],[RUNNING,RUNNING],[SUCCESS,SUCCESS],[ERROR,ERROR],
[MAX_INSTANCES,MAX_INSTANCES],[MISSED,MISSED]
])
result = models.TextField(blank=True, null=True, verbose_name="执行返回结果")
start_tm = models.DateTimeField(auto_now_add=True) # job开始时间
end_tm = models.DateTimeField(auto_now=True) # 结束(成功|失败)时间
trace_message = models.TextField(blank=True, null=True, verbose_name="追踪日志") # 错误记录等
gmt_update = models.DateTimeField(auto_now=True) # 最后更新时间
gmt_created = models.DateTimeField(auto_now_add=True) # 创建时间
def html_status(self):
m = {
self.NEW: "gray",
self.RUNNING: "blue",
self.MAX_INSTANCES: "yellow",
self.MISSED: "yellow",
self.ERROR: "red",
self.SUCCESS: "green"
}
return mark_safe("<p style=\"color: {}\">{}</p>".format(
m[self.status],
self.status
))
html_status.verbose_name="任务状态"
def duration(self):
"""
任务持续时长
:return:
"""
delta = self.end_tm-self.start_tm
if delta < datetime.timedelta(milliseconds=0):
delta = "00:00:00"
return str(delta)
class Meta:
ordering = ('-start_tm', )
class ActionLog(models.Model):
id = models.AutoField(primary_key=True)
action = models.CharField(max_length=256, verbose_name="操作") #
op_host = models.CharField(max_length=128, verbose_name="操作来源") # 哪台设备的操作
gmt_update = models.DateTimeField(auto_now=True) # 最后更新时间
gmt_created = models.DateTimeField(auto_now_add=True) # 创建时间
| {"/background_job/Scheduler.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/utils.py": ["/background_job/models.py"], "/background_job/job.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/models.py": ["/background_job/Trigger.py"], "/background_job/management/commands/process_tasks.py": ["/background_job/JobProcessor.py", "/background_job/Scheduler.py"], "/background_job/admin.py": ["/background_job/models.py"], "/example/jobs.py": ["/background_job/job.py"], "/background_job/JobProcessor.py": ["/background_job/models.py", "/background_job/utils.py"], "/example/test2.py": ["/example/jobs.py"]} |
66,857 | drunkpig/django-background-job | refs/heads/main | /background_job/management/commands/process_tasks.py | # -*- coding: utf-8 -*-
import logging
import queue
import random
import sys
import time
from importlib import import_module
from logging.config import fileConfig
from django import VERSION
from django.core.management.base import BaseCommand
from django.utils import autoreload
from background_job.JobProcessor import JobProcessor
from background_job.Scheduler import Scheduler
fileConfig("logging.ini")
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Run tasks that are scheduled to run on the queue'
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
def run(self, *args, **options):
autodiscover()
start_job_processor()
def handle(self, *args, **options):
self.run(*args, **options)
def autodiscover():
'''autodiscover tasks.py files in much the same way as admin app'''
import imp
from django.conf import settings
for app in settings.INSTALLED_APPS:
try:
app_path = import_module(app).__path__
except AttributeError:
continue
try:
imp.find_module('jobs', app_path)
except ImportError:
continue
import_module("%s.jobs" % app)
logger.info("load module %s.jobs", app)
def start_job_processor():
job_queue = queue.Queue()
sched = Scheduler(queue=job_queue)
sched.start()
processor = JobProcessor(job_queue)
processor.start()
| {"/background_job/Scheduler.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/utils.py": ["/background_job/models.py"], "/background_job/job.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/models.py": ["/background_job/Trigger.py"], "/background_job/management/commands/process_tasks.py": ["/background_job/JobProcessor.py", "/background_job/Scheduler.py"], "/background_job/admin.py": ["/background_job/models.py"], "/example/jobs.py": ["/background_job/job.py"], "/background_job/JobProcessor.py": ["/background_job/models.py", "/background_job/utils.py"], "/example/test2.py": ["/example/jobs.py"]} |
66,858 | drunkpig/django-background-job | refs/heads/main | /background_job/migrations/0002_auto_20210224_2353.py | # Generated by Django 2.2.13 on 2021-02-24 23:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('background_job', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='jobexechistory',
name='result',
field=models.TextField(blank=True, null=True, verbose_name='执行返回结果'),
),
]
| {"/background_job/Scheduler.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/utils.py": ["/background_job/models.py"], "/background_job/job.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/models.py": ["/background_job/Trigger.py"], "/background_job/management/commands/process_tasks.py": ["/background_job/JobProcessor.py", "/background_job/Scheduler.py"], "/background_job/admin.py": ["/background_job/models.py"], "/example/jobs.py": ["/background_job/job.py"], "/background_job/JobProcessor.py": ["/background_job/models.py", "/background_job/utils.py"], "/example/test2.py": ["/example/jobs.py"]} |
66,859 | drunkpig/django-background-job | refs/heads/main | /background_job/admin.py | import datetime
from django.contrib import admin
from django.db.models import Avg
from django.urls import reverse
from django.utils.html import format_html
from django.utils.timezone import now
from .models import DjangoJob, JobExecHistory, DelayedJob, ActionLog
admin.ModelAdmin.list_per_page = 20
@admin.register(DjangoJob)
class DjangoJobAdmin(admin.ModelAdmin):
list_display = ["job_name_html", "job_function", "job_parameters",
"trigger_type", "trigger_expression", "enable","max_instances",
"misfire_grace_time", "coalesce",
"log_succ_interval","log_err_interval",
"gmt_update", "gmt_created"]
actions = []
def job_name_html(self, obj):
link = reverse("admin:background_job_djangojob_change", args=[obj.id])
return format_html('<a href="{}">{}</a>', link, obj.job_name)
job_name_html.short_description="Job Name"
@admin.register(DelayedJob)
class DelayedJobAdmin(admin.ModelAdmin):
list_display = ["job_name_html", "job_function", "job_parameters",
"retry", "retry_cnt",
"gmt_update", "gmt_created"]
actions = []
def job_name_html(self, obj):
link = reverse("admin:background_job_delayedjob_change", args=[obj.id])
return format_html('<a href="{}">{}</a>', link, obj.job_name)
job_name_html.short_description="Job Name"
@admin.register(JobExecHistory)
class DjangoJobExecAdmin(admin.ModelAdmin):
list_display = ["job_name", "result", "trace_message", "html_status", "duration", "start_tm", "end_tm"]
list_filter = ["job_name", "status"]
@admin.register(ActionLog)
class ActionLogAdmin(admin.ModelAdmin):
list_display = ["id", "action", "op_host", "gmt_update", "gmt_created"]
list_filter = ["op_host"]
| {"/background_job/Scheduler.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/utils.py": ["/background_job/models.py"], "/background_job/job.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/models.py": ["/background_job/Trigger.py"], "/background_job/management/commands/process_tasks.py": ["/background_job/JobProcessor.py", "/background_job/Scheduler.py"], "/background_job/admin.py": ["/background_job/models.py"], "/example/jobs.py": ["/background_job/job.py"], "/background_job/JobProcessor.py": ["/background_job/models.py", "/background_job/utils.py"], "/example/test2.py": ["/example/jobs.py"]} |
66,860 | drunkpig/django-background-job | refs/heads/main | /example/jobs.py | from background_job.job import cron_job, delayed_job, interval_job, once_job, boot_job
@cron_job(name="my_func3", cron="*/1 * * * *")
def my_func3():
print("my_func3()")
return "this is func3"
@cron_job(name="my_func", cron="*/2 * * * *", args=('X parameter',), kwargs={"name":"Jin", "age":23})
def my_func(x, name="Jos", age=20):
print(f"my_func(x={x}, name={name}, age={age})")
@boot_job(name="my_func2", )
def my_func2():
print("my_func2()")
@interval_job(name="间隔执行", enable=True, seconds=10, args=('NAME',"VALUE"))
def interval_func(name, value):
print(f"interval_func({name}, {value})")
return "interval_func ************************"
@once_job(name="once_job只运行一次", run_at="2021-02-16 23:39:00", args=("NAME",), kwargs={"key":"KEY"})
def run_once_func(name, value=33, key="xyz"):
print(f"run_once_func({name}, {value}, {key})")
@delayed_job(name="测试用delayed_job", retry=-1, description="只是用在测试上")
def delayed_job_func(name, value):
print(f"{delayed_job_func}({name}, {value})")
def common_func(name, value):
print(f"interval_func({name}, {value})")
return "hhhhhhh"
| {"/background_job/Scheduler.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/utils.py": ["/background_job/models.py"], "/background_job/job.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/models.py": ["/background_job/Trigger.py"], "/background_job/management/commands/process_tasks.py": ["/background_job/JobProcessor.py", "/background_job/Scheduler.py"], "/background_job/admin.py": ["/background_job/models.py"], "/example/jobs.py": ["/background_job/job.py"], "/background_job/JobProcessor.py": ["/background_job/models.py", "/background_job/utils.py"], "/example/test2.py": ["/example/jobs.py"]} |
66,861 | drunkpig/django-background-job | refs/heads/main | /setup.py | from setuptools import find_packages, setup
setup(
name='django-background-job',
version='0.0.1',
description='APScheduler for Django',
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Framework :: Django",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Libraries :: Python Modules",
"Framework :: Django",
],
keywords='django background task, background job',
url='https://github.com/drunkpig/django-background-job.git',
author='drunkpig',
author_email='xuchaoo@gmail.com',
license='MIT',
packages=find_packages(
exclude=("tests", )
),
install_requires=[
'django>=1.11',
'apscheduler',
'crontab'
],
zip_safe=False
)
| {"/background_job/Scheduler.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/utils.py": ["/background_job/models.py"], "/background_job/job.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/models.py": ["/background_job/Trigger.py"], "/background_job/management/commands/process_tasks.py": ["/background_job/JobProcessor.py", "/background_job/Scheduler.py"], "/background_job/admin.py": ["/background_job/models.py"], "/example/jobs.py": ["/background_job/job.py"], "/background_job/JobProcessor.py": ["/background_job/models.py", "/background_job/utils.py"], "/example/test2.py": ["/example/jobs.py"]} |
66,862 | drunkpig/django-background-job | refs/heads/main | /background_job/JobProcessor.py | import threading
from queue import Queue
from concurrent.futures import ThreadPoolExecutor, Future
import importlib, json, logging
import multiprocessing
from background_job.models import JobExecHistory
from background_job.utils import log_job_history_by_id, log_job_history
class JobProcessor(threading.Thread):
LOGGER = logging.getLogger()
def __init__(self, queue: Queue, ):
super().__init__()
self.setDaemon(False)
self.queue = queue
self.threadpool = ThreadPoolExecutor(max_workers=multiprocessing.cpu_count() * 2)
def run(self):
while True:
job = None
try:
job = self.queue.get(block=True)
parameters = json.loads(job.job_parameters)
log_job_history(job, status=JobExecHistory.RUNNING, result=None, trace_message=None)
self.__call(job.id, job.instance_id, job.job_function, *parameters['args'], **parameters['kwargs'])
except Exception as e:
self.LOGGER.exception(e)
log_job_history(job, status=JobExecHistory.ERROR, result=None, trace_message=e)
def __call(self, job_id, job_instance_id, function_string, *args, **kwargs):
"""
"""
mod_name, func_name = function_string.rsplit('.', 1)
mod = importlib.import_module(mod_name)
func = getattr(mod, func_name)
future = self.threadpool.submit(func, *args, **kwargs)
future.job_id = job_id
future.instance_id = job_instance_id
future.function_string = function_string
future.add_done_callback(self.__call_succ)
def __call_succ(self, future: Future): # TODO exception
"""
"""
job_id = future.job_id
job_instance_id = future.instance_id
function_string = future.function_string
try:
if future.cancelled():
self.LOGGER.warning("%s cancelled", function_string)
log_job_history_by_id(job_id, job_instance_id, status=JobExecHistory.MISSED, result=None, trace_message="job cancelled")
elif future.done():
error = future.exception()
if error:
self.LOGGER.error("%s ERROR: %s", function_string, error)
log_job_history_by_id(job_id, job_instance_id, status=JobExecHistory.ERROR, result=None, trace_message=error)
else:
result = future.result()
self.LOGGER.info("%s return: %s", function_string, result)
log_job_history_by_id(job_id, job_instance_id, status=JobExecHistory.SUCCESS, result=result, trace_message=None)
except Exception as e:
self.LOGGER.exception(e)
| {"/background_job/Scheduler.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/utils.py": ["/background_job/models.py"], "/background_job/job.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/models.py": ["/background_job/Trigger.py"], "/background_job/management/commands/process_tasks.py": ["/background_job/JobProcessor.py", "/background_job/Scheduler.py"], "/background_job/admin.py": ["/background_job/models.py"], "/example/jobs.py": ["/background_job/job.py"], "/background_job/JobProcessor.py": ["/background_job/models.py", "/background_job/utils.py"], "/example/test2.py": ["/example/jobs.py"]} |
66,863 | drunkpig/django-background-job | refs/heads/main | /example/test2.py | import logging
from logging.config import fileConfig
from example.jobs import *
if __name__=="__main__":
fileConfig("../logging.ini")
logger = logging.getLogger()
print("***********************")
my_func(3, name="xu")
print("---------------------------")
my_func(5)
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%")
my_func2()
| {"/background_job/Scheduler.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/utils.py": ["/background_job/models.py"], "/background_job/job.py": ["/background_job/models.py", "/background_job/utils.py"], "/background_job/models.py": ["/background_job/Trigger.py"], "/background_job/management/commands/process_tasks.py": ["/background_job/JobProcessor.py", "/background_job/Scheduler.py"], "/background_job/admin.py": ["/background_job/models.py"], "/example/jobs.py": ["/background_job/job.py"], "/background_job/JobProcessor.py": ["/background_job/models.py", "/background_job/utils.py"], "/example/test2.py": ["/example/jobs.py"]} |
66,879 | bwalkowi/pypageobject | refs/heads/master | /pypageobject/_page_objects.py | from contextlib import suppress
class PageObject:
_id = None
_click_area = None
def __init__(self, driver, web_elem=None, parent=None):
self.driver = driver
self.web_elem = web_elem if web_elem is not None else driver
self.parent = parent
self._ignore_id = False
def is_displayed(self):
return self.web_elem.is_displayed()
def click(self):
click_area = self._click_area
if click_area is None:
click_area = self.web_elem
is_enabled = (click_area.is_enabled() and
'disabled' not in click_area.get_attribute('class'))
if is_enabled and click_area.is_displayed():
click_area.click()
else:
raise RuntimeError(f'unable to click on {self}')
def __str__(self):
name = type(self).__name__
parent_name = f' in {self.parent}' if self.parent is not None else ''
if not self._ignore_id and self._id is not None:
# in case when retrieving _id fails and recurse into __str__
# omit this part as to not fall into infinite recursion
self._ignore_id = True
with suppress(RuntimeError):
name = f'{self._id} {name}'
self._ignore_id = False
return name + parent_name
class Button(PageObject):
def __call__(self):
self.click()
def is_enabled(self):
return (self.web_elem.is_enabled() and
'disabled' not in self.web_elem.get_attribute('class'))
def is_active(self):
return 'active' in self.web_elem.get_attribute('class')
class NamedButton(Button):
@property
def text(self):
return self.web_elem.text
_id = text
| {"/pypageobject/__init__.py": ["/pypageobject/_page_objects.py", "/pypageobject/_page_elements.py"], "/pypageobject/_page_elements.py": ["/pypageobject/__init__.py"]} |
66,880 | bwalkowi/pypageobject | refs/heads/master | /pypageobject/__init__.py | from functools import partial
from ._page_objects import (PageObject, Button as ButtonPageObject,
NamedButton as NamedButtonPageObject)
from ._page_elements import (Selector, AbstractPageElement,
Element, Elements, Label, Input)
Button = partial(Element, cls=ButtonPageObject)
NamedButton = partial(Element, cls=NamedButtonPageObject)
| {"/pypageobject/__init__.py": ["/pypageobject/_page_objects.py", "/pypageobject/_page_elements.py"], "/pypageobject/_page_elements.py": ["/pypageobject/__init__.py"]} |
66,881 | bwalkowi/pypageobject | refs/heads/master | /pypageobject/_page_elements.py | from enum import Enum
from time import sleep
from typing import Optional, Type
from abc import ABC, abstractmethod
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from . import PageObject
class Selector(Enum):
CSS = By.CSS_SELECTOR
ID = By.ID
NAME = By.NAME
XPATH = By.XPATH
LINK_TEXT = By.LINK_TEXT
PARTIAL_LINK_TEXT = By.PARTIAL_LINK_TEXT
TAG_NAME = By.TAG_NAME
CLASS_NAME = By.CLASS_NAME
def __repr__(self):
return f'{type(self).__name__}.{self.name}'
class AbstractPageElement(ABC):
def __init__(self, locator: str, *, selector: Selector = Selector.CSS):
self.locator = locator
self.selector = selector
@abstractmethod
def __get__(self, instance, owner):
if instance is None:
return self
try:
item = instance.web_elem.find_element(self.selector.value,
self.locator)
except NoSuchElementException:
raise RuntimeError(f'unable to find {self.name} in {instance}')
else:
return item
def __set__(self, instance, value):
raise AttributeError("can't set attribute")
def __delete__(self, instance):
raise AttributeError("can't delete attribute")
def __set_name__(self, owner, name):
self.name = name.replace('_', ' ').strip().upper()
class Label(AbstractPageElement):
def __get__(self, instance, owner):
if instance is None:
return self
return super().__get__(instance, owner).text
class Input(AbstractPageElement):
def __get__(self, instance, owner):
if instance is None:
return self
return super().__get__(instance, owner).get_attribute('value')
def __set__(self, instance, val):
input_box = super().__get__(instance, type(instance))
input_box.clear()
if val != '':
# sometimes send_keys doesn't send all keys, so it is necessary
# to try sending them several times
for _ in range(10):
input_box.send_keys(val)
if input_box.get_attribute('value') == val:
break
else:
input_box.clear()
sleep(0.01)
else:
raise RuntimeError(f'entering "{val}" to {self.name} '
f'in {instance} failed')
class Element(AbstractPageElement):
def __init__(self, *args, cls: Optional[Type[PageObject]] = None,
text: Optional[str] = None, **kwargs):
super().__init__(*args, **kwargs)
self.cls = cls
self.text = text
def __get__(self, instance, owner):
if instance is None:
return self
if self.text is None:
item = super().__get__(instance, owner)
else:
items = instance.web_elem.find_elements(self.selector.value,
self.locator)
for item in items:
if item.text.lower() == self.text.lower():
break
else:
raise RuntimeError(f'no {self.name} with "{self.text}" '
f'text found in {instance}')
if self.cls is not None:
return self.cls(instance.driver, item, instance)
else:
return item
class Elements(Element):
def __get__(self, instance, owner):
if instance is None:
return self
items = instance.web_elem.find_elements(self.selector.value,
self.locator)
if self.text is not None:
items = [item for item in items if item.text.lower() == self.text]
if self.cls is not None:
return [self.cls(instance.driver, item, instance) for item in items]
else:
return items
| {"/pypageobject/__init__.py": ["/pypageobject/_page_objects.py", "/pypageobject/_page_elements.py"], "/pypageobject/_page_elements.py": ["/pypageobject/__init__.py"]} |
66,882 | bwalkowi/pypageobject | refs/heads/master | /setup.py | from setuptools import setup
setup(
name='pypageobject',
version='0.1.0',
description='framework for creating page objects in python',
long_description=open('README.rst').read(),
author='bwalkowi',
url='https://github.com/bwalkowi/pypageobject',
packages=['pypageobject'],
install_requires=['selenium>=3.0.0'],
license='MIT',
keywords='pageobject testing automation',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
]
)
| {"/pypageobject/__init__.py": ["/pypageobject/_page_objects.py", "/pypageobject/_page_elements.py"], "/pypageobject/_page_elements.py": ["/pypageobject/__init__.py"]} |
66,940 | doldol1/Crawlaper | refs/heads/master | /ntis_scraper_selenium.py | from selenium import webdriver
from selenium.webdriver.support.ui import Select
from collections import defaultdict
import bs4
import sys
import re
'''
chrome_driver=webdriver.Chrome('D:/scrapper/web_driver/chromedriver.exe')
other_page=webdriver.Chrome('D:/scrapper/web_driver/chromedriver.exe')
#chrome_driver.implicitly_wait(3)
# chrome_driver.get('https://nid.naver.com/nidlogin.login')
chrome_driver.get('https://sso2.ntis.go.kr/3rdParty/loginFormPageID.jsp')
# chrome_driver.find_element_by_name('id').send_keys('waderer')
# chrome_driver.find_element_by_name('pw').send_keys('mt321671')
chrome_driver.find_element_by_name('userid').send_keys('doldol1')
chrome_driver.find_element_by_name('password').send_keys('Fish5321671!')
chrome_driver.find_element_by_class_name('btn_blue').click()
other_page.implicitly_wait(3)
other_page.get('http://rndgate.ntis.go.kr/switch.do?prefix=/ia/info&page=/ProjectGroup.do?method=iaProjectSubjectList&searchVO.yrFrom=2016&searchVO.yrTo=2016')
# chrome_driver.find_element_by_xpath('//*[@id="frmNIDLogin"]/fieldset/input').click()
'''
# chrome_driver=webdriver.Chrome('D:/scrapper/web_driver/chromedriver.exe')
# chrome_driver.get('http://rndgate.ntis.go.kr/switch.do?prefix=/ia/info&page=/ProjectGroup.do?method=iaProjectSubjectList&searchVO.yrFrom=2016&searchVO.yrTo=2016')
# print(chrome_driver.window_handles)
# chrome_driver.find_element(By.LINK_TEXT, '로그인').click()
# chrome_driver.find_element_by_link_text("로그인").click()
# print(chrome_driver.window_handles)
# main_window=chrome_driver.window_handles[0]
# chrome_driver.switch_to.window(chrome_driver.window_handles[1])
# chrome_driver.find_element_by_name('userid').send_keys('doldol1')
# chrome_driver.find_element_by_name('password').send_keys('Fish5321671!')
# chrome_driver.find_element_by_class_name('btn_blue').click()
# chrome_driver.switch_to.window(main_window)
# html=chrome_driver.page_source
# soup=bs4.BeautifulSoup(html, 'lxml')
# res_file=open('res_file2.html', 'w', encoding='utf-8')
# s=soup.read().decode('utf-8')
# res_file.write(str(soup))
# print(str(soup))
####################NTIS Scaper Logic######################
## 1. NTIS의 검색페이지('http://rndgate.ntis.go.kr/switch.do?prefix=/ia/info&page=/ProjectGroup.do?method=iaProjectSubjectList&searchVO.yrFrom=2016&searchVO.yrTo=2016') 접속
## 2. NTIS로그인(Selenium)
## 3. 검색에 입력(Scraping대상이 많기 때문에 저장까지 마친 뒤 다음 검색어로 이동)
## 4. 페이지 수집(for search_box in '연구내용', '한글키워드', '영문키워드')
## -Scraping할 페이지 이동
## -먼저 각 페이지의 목록 수집
## -해당 페이지 목록 수집 뒤 각각의 세부 링크 정보 수집
## -모든 페이지의 Scraping
## 5. parsing 및 텍스트 정제, 중복 데이터 처리
## 6. 저장(css)
###########################################################
url='http://rndgate.ntis.go.kr/switch.do?prefix=/ia/info&page=/ProjectGroup.do?method=iaProjectSubjectList&searchVO.yrFrom=2016&searchVO.yrTo=2016'
keyword_list=['해양', '갈조류', '연어알', '감태', '우뭇가사리', '개다시마', '전복', '개청각', '진주', '구멍쇠미역', '참미역', '캐비어', '글루코사민', '김', '큰실말', '나래미역', '클로렐라', '납작파래', '키토산', '다시마', '톳', '돌미역', '퉁퉁마디', '멸치', '모자반', '플랑크톤', '미세조류', '함초', '미역', '해삼', '미역귀', '해조', '바다포도', '해조류', '불가사리', '상어연골', '스피룰리나', '스피루리나', '어류 콜라겐', '콜라겐']
eng_keywords=['ocean','brown algae','salmon roe','kajime','Gelidiumamansii','Kjellmaniella crassifolia','abalone','Codium dichotomum', 'pearl', 'Agarum cribrosum', 'Kjellmaniella gyrata', 'caviar', 'glucosamine', 'laver', 'phaeophyta', 'Alaria esculenta', 'chlorella', 'green confertii', 'chitosan', 'laminaria', 'hijiki', 'Brown Rock Seaweed', 'Salicornia europaea Linnaeus', 'engraulis japonicus', 'Sargassum fulvellum', 'plankton', 'microalgae', 'Salicornia europaea Linnaeus', 'Undaria pinnatifida','sea cucumber', 'Undaria pinnatifida', 'seaweed', 'Caulerpa lentillifera', 'seaweed', 'starfish', 'Shark cartilage', 'spirulina', 'spirulina', 'fish collagen', 'collagen']
keyword_list=['갈조류']
eng_keywords=['brown algae']
###완료 리스트###
## keyword_list=['해양','갈조류','연어알','감태','우뭇가사리','개다시마','전복','개청각','진주','구멍쇠미역','참미역','캐비어','글루코사민','김','큰실말', '나래미역', '클로렐라', '납작파래', '키토산', '다시마', '톳', '돌미역', '퉁퉁마디', '멸치', '모자반', '플랑크톤', '미세조류', '함초', '미역','해삼', '미역귀', '해조', '바다포도', '상어연골', '스피룰리나', '스피루리나', '어류 콜라겐', ]
## eng_keywords=['ocean','brown algae','salmon roe','kajime','Gelidiumamansii', 'Kjellmaniella crassifolia','abalone','Codium dichotomum','pearl','Agarum cribrosum', 'Kjellmaniella gyrata', 'caviar', 'glucosamine','laver','phaeophyta','Alaria esculenta','chlorella','green confertii','chitosan','laminaria', 'hijiki', 'Brown Rock Seaweed', 'Salicornia europaea Linnaeus', 'engraulis japonicus', 'Sargassum fulvellum', 'plankton', 'microalgae', 'Salicornia europaea Linnaeus', 'Undaria pinnatifida','sea cucumber', 'Undaria pinnatifida', 'seaweed', 'Caulerpa lentillifera','Shark cartilage', 'spirulina', 'spirulina', 'fish collagen', ]
################
# keyword_list= ['글루코사민','김','큰실말']
# eng_keywords=['glucosamine','laver','phaeophyta']
# keyword_list= ['해조류', '불가사리',]
# eng_keywords=[ 'seaweed', 'starfish',]
# keyword_list=['망고']
# eng_keywords=['mango']
# def dd_init():
# data_dic=defaultdict(list)
# return data_dic
#문제: 공동연구안나옴(체크된 항목없음)
def data_save(keyword, data_extracted):
file_name=keyword+'관련 연구 사업 검색결과.txt'
file_instance=open(file_name, 'w', encoding='utf-8')
for writing_list in data_extracted:
for element_index in range(len(writing_list)):
if element_index == 29:
file_instance.write(writing_list[-2])
elif element_index == 30:
file_instance.write(writing_list[-1])
elif element_index > 31:
file_instance.write(writing_list[element_index-2])
else:
file_instance.write(writing_list[element_index])
# if len(writing_list) == writing_element+1:
# print('줄바꿈')
# file_instance.write('\n')
# else:
file_instance.write(',')
file_instance.write('\n')
file_instance.close()
return True
def data_refiner(total_data):
black_set=set()
count_dup=0
for list_index in range(len(total_data)):
for inspect_index in range(list_index+1, len(total_data)):## 인덱스 조정이 필요 2017.07.24
if total_data[list_index][8]==total_data[inspect_index][8]:
# del total_data[inspect_index]
# print('중복 데이터 '+str(total_data[list_index][8])+'가 발견되었고, 해당 데이터는 삭제될 것입니다.')
black_set.add(inspect_index)
# count_dup+=1
count_dup=len(black_set)
print('count_dup:', count_dup)
for black in sorted(black_set, reverse=True):
# print(str(black)+'번의 과제번호는 '+total_data[black][8]+'이며, 삭제되었습니다.')
del total_data[black]
count_dup-=1
print('count_dup:', count_dup)
for list_index in range(len(total_data)):##데이터(문자열) 정리
##콤마 제거
for data_index in range(len(total_data[list_index])):
# if total_data[list_index][data_index].count(','):
total_data[list_index][data_index]=total_data[list_index][data_index].replace(',', '')
total_data[list_index][data_index]=total_data[list_index][data_index].replace('\n', '')
total_data[list_index][data_index]=total_data[list_index][data_index].replace('\t', '')
total_data[list_index][data_index]=total_data[list_index][data_index].replace('Nodata', '')
# if(list_index==1):
# print(total_data[list_index][data_index])
##특수문자 제거
# total_data[list_index][data_index]=re.sub("[^().,가-힣0-9a-zA-Z]","",total_data[list_index][data_index])
return total_data
def detail_extract(web_driver, data_list):
bs_tmp=bs4.BeautifulSoup(web_driver.page_source, 'lxml')
for i in range(1,len(bs_tmp.find('table', {'class':'basic_list'}).tbody.find_all('tr'))+1) :##각 세부사항 페이지를 돌 루프.. 실질적으로 노가다 필요함
# web_driver.find_element_by_xpath("//table[@class='basic_list']/tbody/tr["+str(i)+"]/td[6]/a").click()
sr_btn=web_driver.find_element_by_xpath("//table[@class='basic_list']/tbody/tr["+str(i)+"]/td[6]/a")
web_driver.execute_script("arguments[0].click()",sr_btn)
bs_detail=bs4.BeautifulSoup(web_driver.page_source, 'lxml')
# if i==1:#범례?('과제고유번호', 총연구기관' 등)
# for chart in bs_detail.find('div', {'id':'divMain'}).find_all('th'):
# print(chart.get_text().strip())
# data_list[i].append(chart.get_text().strip())
for value in bs_detail.find('div', {'id':'divMain'}).find_all('td'):##값 추출
if value.find('a'):
# print(value.find('a').get_text().strip())
data_list[i-1].append(value.find('a').get_text().strip())
elif value.find('input'):
for check_check in value.find_all('input'):
if re.search(str(check_check), 'checked'):
# print(check_check.attrs['title'].strip())
data_list[i-1].append(check_check.attrs['title'].strip())
elif value.get_text().find('showChart')!=-1: ##showChart 값은 필요 없으므로 제외. find는 값이 없을 때 -1을 리턴함
# print(value.get_text().find('showChart'))
pass
else:
# print(value.get_text().strip())
data_list[i-1].append(value.get_text().strip())
#사이트 구조 상 일관적인 논리로 풀어가기 어려운 면이 있어 요약서의 연구목표와 연구내용은 노가다...
summary_list=bs_detail.find('div', {'id':'divSummary'}).table.tbody.find_all('td',{'class':'view_p30'})
# print(summary_list[0].get_text().strip())
data_list[i-1].append(summary_list[0].get_text().strip())
# print(summary_list[1].get_text().strip())
data_list[i-1].append(summary_list[1].get_text().strip())
web_driver.execute_script('window.history.go(-1)')
return data_list
def list_extract(bs_html, data_list):
# th_row_el=list()
# for th_row in bs_html.find('table', {'class':'basic_list'}).thead.tr.find_all('th'):#테이블 맨 위 범례?(연도, 신부처 등) 추출
# if th_row.find('a') is not None:
# th_row_el.append(th_row.find('a').get_text())
# else:
# th_row_el.append(th_row.get_text())
# data_list.append(th_row_el)
for table_row in bs_html.find('table', {'class':'basic_list'}).tbody.find_all('tr'):#각 테이블의 행 추출
row_el=list()
for ele in table_row.find_all('td'):#하나의 행에 들어 있는 원소 추출
if re.search(ele.get_text(),'[A-za-z0-9가-힣]+'):#공란일 경우 Nodata
row_el.append('Nodata')
elif ele.find('img'):#이미지 파일(주관, 협동, 위탁)일 경우 해당 단어
if ele.find('img').attrs['alt'] == '주관':
row_el.append('주관')
elif ele.find('img').attrs['alt'] == '협동':
row_el.append('협동')
elif ele.find('img').attrs['alt'] == '위탁':
row_el.append('위탁')
else:
row_el.append('Nodata')
else:#나머지는 일반적인 data
row_el.append(ele.get_text().strip())
# print('row의 값은',row_el)
data_list.append(row_el)
# print(data_list)
return data_list
# assert False
def extract_operator(web_driver, ntis_keyword, eng_keyword):
total_data=list()
##############selenium으로 접근######################################
point_list=['연구내용', '한글키워드', '영문키워드']
for point in point_list: ##루프: '연구내용', '한글키워드', '영문키워드'항목에 각각 ntis_keyword삽입
if point=="연구내용":
# web_driver.find_element_by_id("searchMoreBtn").click()
sr_btn=web_driver.find_element_by_id("searchMoreBtn")
web_driver.execute_script("arguments[0].click()",sr_btn)
web_driver.find_element_by_id("pjSum0").click()
# web_driver.find_element_by_id("BT(생명공학기술)").click()
sr_btn=web_driver.find_element_by_id("BT(생명공학기술)")
web_driver.execute_script("arguments[0].click()",sr_btn)
select_from=Select(web_driver.find_element_by_name("yrFrom_h"))
select_from.select_by_value("2004")
select_to=Select(web_driver.find_element_by_name("yrTo_h"))
select_to.select_by_value("2016")
web_driver.find_element_by_name("searchVO.pj_cont").send_keys(ntis_keyword)
elif point=="한글키워드":#넘어가는 과정에서의 버그 잡아야함
web_driver.find_element_by_name("searchVO.pj_han_key").send_keys(ntis_keyword)
elif point=="영문키워드":
web_driver.find_element_by_name("searchVO.pj_eng_key").send_keys(eng_keyword)
else:
print('연구내용, 한글키워드, 영문키워드 모두 아닌 값이 발생했습니다.')
sys.exit(1)
# web_driver.find_element_by_xpath("//a[@href='javascript:goSearch();']").click()
sr_btn=web_driver.find_element_by_xpath("//a[@href='javascript:goSearch();']")
web_driver.execute_script("arguments[0].click()",sr_btn)
###################################################################
bs_html=bs4.BeautifulSoup(web_driver.page_source, 'lxml')
if bs_html.find('table', {'class': 'basic_list'}).tbody.tr.td.get_text().count('검색된 결과가 없습니다.') > 0:
if point is not "영문키워드":
print(ntis_keyword+'에서 '+point+'의 검색 결과가 없습니다.')
else:
print(eng_keyword+'에서 '+point+'의 검색 결과가 없습니다.')
web_driver.find_element_by_id("searchMoreBtn").click()
web_driver.find_element_by_name("searchVO.pj_cont").clear()
web_driver.find_element_by_name("searchVO.pj_han_key").clear()
web_driver.find_element_by_name("searchVO.pj_eng_key").clear()
continue
tot_num=bs_html.find('h3', {'class':'t_head'}).span.get_text().split('(')[0]
sec_num=bs_html.find('h3', {'class':'t_head'}).span.get_text().split('(')[1]
# print(tot_num)
# print(sec_num)
#
page_num=(int(re.search('[0-9]*,{0,1}[0-9]+건',tot_num).group().replace(',','').replace('건',''))-int(re.search('[0-9]*,{0,1}[0-9]+건',sec_num).group().replace(',','').replace('건',''))-1)//10+1
# assert False
# page_num=int(int(re.search('=[0-9]+' ,bs_html.find("a", {"name":"L"}).attrs["href"]).group().strip('='))/10+1)#검색결과목록 페이지의 수
# except AttributeError:
# print('페이지 리스트를 찾지 못했습니다. 페이지가 하나인 것으로 간주하고 진행합니다.')
# page_num=2
current_page=1#현재 데이터를 추출하고 있는 페이지
print(ntis_keyword+'의'+point+'는 '+str(page_num)+'페이지입니다.')
while current_page <= page_num: ##검색 결과 목록 페이지가 끝날 때까지(if numbered a tag attr.href == '끝' a tag attr.href)
print(point+'의 '+str(current_page)+'page 진행중')
data_list=[]#한 페이지의
bs_html=bs4.BeautifulSoup(web_driver.page_source, 'lxml')
data_list=list_extract(bs_html, data_list) #목록 페이지 정보 추출
data_list=detail_extract(web_driver, data_list)#세부사항 추출, 이 둘을 합치는 작업도 같이 처리됨
# print(str(data_list))
# assert False
# print(data_list)
total_data+=data_list#list_extract와 detail_extract의 data가 합쳐진 data_list를 저장
# if current_page !=page_num:
current_page+=1
if page_num <= 1 or current_page > page_num :
pass
else:
# web_driver.find_element_by_link_text(str(current_page)).click()
sr_btn=web_driver.find_element_by_link_text(str(current_page))
web_driver.execute_script("arguments[0].click()",sr_btn)
# assert False
# web_driver.implicitly_wait(1)
#검색 창 초기화
# web_driver.find_element_by_id("searchMoreBtn").click()
sr_btn=web_driver.find_element_by_id("searchMoreBtn")
web_driver.execute_script("arguments[0].click()",sr_btn)
web_driver.find_element_by_name("searchVO.pj_cont").clear()
web_driver.find_element_by_name("searchVO.pj_han_key").clear()
web_driver.find_element_by_name("searchVO.pj_eng_key").clear()
total_data=data_refiner(total_data)
# web_driver.find_element_by_xpath("//div[@class='serviceHead']/ul/li[4]/a").click()
sr_btn=web_driver.find_element_by_id("searchMoreBtn")
web_driver.execute_script("arguments[0].click()",sr_btn)
# print(total_data)
##루프 안의 항목들은 list에 list형식으로 저장하며,
##루프:
##각 항목에 대한 목록 Scraping.
##루프: 목록의 세부사항(과제의 세부사항) Scraping
##추출 전 과제고유번호로 중복검사
## 중복검사시 중복으로 밝혀질 경우 저장하지 않도록 함.
return total_data
def ntis_connect(web_driver, url, id='doldol1', pw='Fish5321671!'):
##url로 이동
web_driver.get(url)
# web_driver.find_element_by_link_text("로그인").click()
element=web_driver.find_element_by_link_text("로그인")
web_driver.execute_script("arguments[0].click()",element)
##id, pw 로그인
main_window=web_driver.window_handles[0]
web_driver.switch_to.window(web_driver.window_handles[1])
web_driver.find_element_by_name('userid').send_keys(id)
web_driver.find_element_by_name('password').send_keys(pw)
web_driver.find_element_by_class_name('btn_blue').click()
web_driver.switch_to.window(main_window)
##selenium 객체 리턴
return web_driver
def main():
web_driver=webdriver.Chrome('D:/scrapper/web_driver/chromedriver.exe')
##접속 및 로그인 함수 사용 및 객체 받기
web_driver=ntis_connect(web_driver, url)
##루프: 모든 키워드 검색이 끝날 때까지
for i in range(len(keyword_list)):
# try:
# print(keyword_list[i])
data_extracted=extract_operator(web_driver, keyword_list[i], eng_keywords[i])##검색어 입력 및 페이지 수집(각 키워드에 대해 for문 돌림, '연구내용', '한글키워드', '영문키워드'는 한 루프에서 모두 처리)+파싱 및 텍스트 정제, 중복 데이터 처리
# print('입니다.')
# except:
# continue
print('파일화')
data_save(keyword_list[i], data_extracted)
i+=1
# break
##키워드별 저장
##최종중복검사 새 파일 생성
if __name__=='__main__':
main() | {"/all_scraper/naver_scraper/admin.py": ["/all_scraper/naver_scraper/models.py"], "/all_scraper/naver_scraper/views.py": ["/all_scraper/naver_scraper/models.py", "/all_scraper/naver_scraper/Urllist_server.py"]} |
66,941 | doldol1/Crawlaper | refs/heads/master | /chbio_scraper.py | from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait as EC
from collections import defaultdict
from seleniumrequests import Firefox
import bs4
import sys
import re
from bs4 import BeautifulSoup
import time
# url='http://qy1.sfda.gov.cn/datasearch/face3/base.jsp?'\
# 'tableId=68&tableName=TABLE68&'\
# 'title=%B9%FA%B2%FA%CC%D8%CA%E2%D3%C3%CD%BE%BB%AF%D7%B1%C6%B7&'\
# 'bcId=138009396676753955941050804482'
##################################
##커뮤니티에서 알려준 풀 문장
url='http://qy1.sfda.gov.cn/datasearch/face3/search.jsp?'\
'tableId=68&bcId=138009396676753955941050804482&'\
'tableName=TABLE68&viewtitleName=COLUMN787&'\
'viewsubTitleName=COLUMN793%2CCOLUMN789&'\
'tableView=%E5%9B%BD%E4%BA%A7%E7%89%B9%E6%AE'\
'%8A%E7%94%A8%E9%80%94%E5%8C%96%E5%A6%86%E5'\
'%93%81&curstart=1'
#####################################
# target_url='http://app1.sfda.gov.cn/datasearch/face3/base.jsp?'\
# 'tableId=30&tableName=TABLE30&'\
# 'title=%B9%FA%B2%FA%B1%A3%BD%A1%CA%B3%C6%B7&'\
# 'bcId=118103385532690845640177699192'
def chkURL(i, URL):
#네이버 뉴스 특성상 URL의 맨 뒤에 page='페이지번호n'이 있으며 if-elif문은 URL의 '페이지번호n'을 바꿔주는 역할을 한다.
if URL.count('page=') is 0:
URL=URL+'&page=1'
if i >= 1 and i < 10:
URL=URL[:-1]+str(i)
elif i >= 10 and i < 100:
if i==10:
URL=URL[:-1]+str(i)
else:
URL=URL[:-2]+str(i)
elif i >= 100 and i < 1000:
if i==100:
URL=URL[:-2]+str(i)
else:
URL=URL[:-3]+str(i)
elif i >= 1000 and i < 10000:
if i==1000:
URL=URL[:-3]+str(i)
else:
URL=URL[:-4]+str(i)
elif i >= 10000 and i < 100000:
if i==10000:
URL=URL[:-4]+str(i)
else:
URL=URL[:-5]+str(i)
elif i >= 100000 and i < 1000000:
if i==100000:
URL=URL[:-5]+str(i)
else:
URL=URL[:-6]+str(i)
elif i >= 1000000 and i < 10000000:
if i==1000000:
URL=URL[:-6]+str(i)
else:
URL=URL[:-7]+str(i)
return URL
txt_file=open('chbio_cos.txt', 'w', encoding='utf-8')
j=1
while(j<=2235):
# web_driver=webdriver.Chrome('D:\scrapper\web_driver\chromedriver.exe')
web_driver=Firefox()
# web_driver.get(url)
web_driver.request('POST', url, data={"tableId": "68", "State":"1", "bcId":"138009396676753955941050804482")
bs_tmp=BeautifulSoup(web_driver.page_source, 'lxml')
the_list=bs_tmp.find_all('a')
for i in the_list:
print(i.get_text())
txt_file.write(i.get_text()+'\n')
# web_driver.find_element_by_name('goInt').clear()
# web_driver.find_element_by_name('goInt').send_keys(str(j))
# web_driver.find_element_by_xpath("//div[@id='content']/div/table[4]/tbody/tr/td[7]/input").click()
j+=1
url=chkURL(j, url)
web_driver.quit()
time.sleep(5)
# print(web_driver.find_element_by_xpath("//div[@id='content']/div/table[4]/tbody/tr/td[1]").text)
# web_driver.find_element_by_xpath("//div[@id='content']/div/table[@width='620']/tbody/tr/td[4]/img").click()
# print(bs_page.find('div',{'id':'content'}).find_all('a').get_text()) | {"/all_scraper/naver_scraper/admin.py": ["/all_scraper/naver_scraper/models.py"], "/all_scraper/naver_scraper/views.py": ["/all_scraper/naver_scraper/models.py", "/all_scraper/naver_scraper/Urllist_server.py"]} |
66,942 | doldol1/Crawlaper | refs/heads/master | /noun_parser.py | from konlpy.tag import Hannanum
import xlwt
import xlrd
file_name='D:\scrapper\사회적기업\원본\\2000~2010년 사회적기업 뉴스 스크래핑.xls'
def parser():
wt_sheetnum=1
#엑셀 읽기
rd_workbook=xlrd.open_workbook(file_name)
rd_worksheet=rd_workbook.sheet_by_index(0)
#엑셀 쓰기
wt_workbook=xlwt.Workbook(encoding='utf-8')
wt_worksheet=wt_workbook.add_sheet('Sheet'+str(wt_sheetnum))
#읽은 엑셀 파일의 총 row와 col수
tot_rows=rd_worksheet.nrows
tot_cols=rd_worksheet.ncols
#읽을 엑셀 파일 row, col변수 초기화
rd_num_row=0
rd_num_col=4
#쓸 파일의 row
wt_row_contents=0
#쓸 파일의 col, inpagenum은 해당 기사 안에서 몇 번째 단어인지 넘버링할 때 사용할 col, pagenum은 해당 기사가 있는 원본 excel 파일의 row를 표시할 col, contentnum은 분리한 단어가 입력될 col을 의미한다.
wt_col_inpagenum=0
wt_col_pagenum=1
wt_col_contentnum=2
#각 셀 안에 들어갈 정보, inpage는 해당 기사 안에서 몇 번째 단어인지 표시, page는 몇 번째 기사에서 추출한 단어인지 표시, content는 추출한 단어 그 자체
ct_inpage=1
ct_page=str(rd_num_row+1)+'th page nouns'
ct_content=''
hannanum=Hannanum()
print('분석할 엑셀 파일의 행 수는 '+str(tot_rows)+'이고 열 수는 '+str(tot_cols))
while rd_num_row < tot_rows:
ct_cell=rd_worksheet.cell_value(rd_num_row, rd_num_col)
noun_list=hannanum.nouns(ct_cell)
ct_inpage=1
for ct_content in noun_list:
print(str(wt_sheetnum)+'th Sheet, '+str(wt_row_contents)+'th row, '+str(ct_inpage)+'th noun is '+str(ct_content))
if wt_row_contents > 65534:
wt_sheetnum=wt_sheetnum+1
wt_worksheet=wt_workbook.add_sheet('Sheet'+str(wt_sheetnum))
wt_row_contents=0
wt_worksheet.write(wt_row_contents, wt_col_inpagenum, str(ct_inpage))
wt_worksheet.write(wt_row_contents, wt_col_pagenum, ct_page)
wt_worksheet.write(wt_row_contents, wt_col_contentnum, ct_content)
wt_row_contents=wt_row_contents+1
ct_inpage=ct_inpage+1
rd_num_row=rd_num_row+1
ct_page=str(rd_num_row+1)+'th page nouns'
wt_workbook.save('noun_result.xls')
def main():
parser()
if __name__ == '__main__':
main() | {"/all_scraper/naver_scraper/admin.py": ["/all_scraper/naver_scraper/models.py"], "/all_scraper/naver_scraper/views.py": ["/all_scraper/naver_scraper/models.py", "/all_scraper/naver_scraper/Urllist_server.py"]} |
66,943 | doldol1/Crawlaper | refs/heads/master | /project_saewol/Naver_crawler_partial.py | # from Urllist import Urllist
from bs4 import BeautifulSoup
import urllib.request
import urllib.parse
import urllib
import re
import xlsxwriter
import xlrd
import datetime
target_url='https://search.naver.com/search.naver?&where=news&'\
'query=%EC%84%B8%EC%9B%94%ED%98%B8&'\
'sm=tab_pge&sort=0&photo=0&field=1&reporter_article=&pd=3&'\
'ds=2014.04.14&de=2018.03.05&docid=&'\
'nso=so:r,p:from20140414to20180305,a:t&mynews=1&cluster_rank=38&'\
'start=1&refresh_start=0'
#첫째 줄: 네이버 뉴스검색(모든 뉴스검색에 필수적으로 들어가는 요소)
#둘째 줄: query는 검색어를 말함. ASCII를 인코딩해야 됨
#url의 마지막에 있는 cluster_rank는 검색할 때마다 올라가는데, 줄여 줄 필요가 있어 보인다.
class data_box:
def __init__(self):
self.news_subject='default'
self.news_date='default'
self.news_press='default'
self.news_link='default'
self.news_text='default'
class Naver_crawler:
def __init__(self):
pass
def url_setter(self, target_url):
splited_url=target_url.split('&')
for splited in range(len(splited_url)):
if 'cluster_rank=' in splited_url[splited]:
splited_url[splited]='cluster_rank=0'
elif 'start=' in splited_url[splited]:
if 'refresh_start' in splited_url[splited]:
continue
else:
start_number=int(splited_url[splited].split('=')[1])
start_number+=10
splited_url[splited]='start='+str(start_number)
target_url='&'.join(splited_url)
return target_url
def crawling(self, target_url, press_cookie=None):#말 그대로 크롤링에만 집중
page_quit=True
present_page='1'
last_page=''
source_stack=list()
splited_url=target_url.split('&')
for splited in range(len(splited_url)):
if 'ds=' in splited_url[splited]:
start_day=splited_url[splited].replace('ds=','')
elif 'de=' in splited_url[splited]:
end_day=splited_url[splited].replace('de=','')
while int(present_page) < 410:#네이버가 한번에 내놓을 수 있는 검색결과 페이지가 400개 이기 때문에 넉넉함
try:
if press_cookie is not None:
req_instance=urllib.request.Request(target_url, headers=press_cookie)
else:
req_instance=urllib.request.Request(target_url)
# print(target_url)
page_code=urllib.request.urlopen(req_instance)
except urllib.error.URLError as e:
print("Naver와 접속되지 않습니다. 다시 접속을 시도합니다.")
continue
bs_page=BeautifulSoup(page_code, 'lxml', from_encoding='utf-8')
if bs_page.find('div',{'class':'paging'}).find('strong')==None:
print('총 페이지는 {0}입니다.'.format(present_page))
break
else:
present_page=bs_page.find('div',{'class':'paging'}).find('strong').get_text()
if last_page==present_page:
print(str(last_page)+'마지막.')
print('총 페이지는 {0}입니다.'.format(present_page))
break
last_page=present_page
source_stack.append(bs_page)
print('{0}~{1}에 해당되는 {2}페이지의 소스를 저장했습니다.'.format(start_day, end_day, present_page))
target_url=self.url_setter(target_url)
req_instance
return source_stack
class Naver_scraper:
def __init__(self):
pass
def scraping(self, source_stack):
databox_list=list()
datapage_num=1
# for i in range(len(source_stack)):
# print(str(i)+'번째 Data를 추출합니다.')
# data_chunk=data_box()
# for data in source_stack[i].find('ul', {'class':'type01'}).find_all('dl'):
for source in source_stack:
print(str(datapage_num)+'page의 Data를 추출합니다.')
for data in source.find('ul', {'class':'type01'}).find_all('dl'):
data_chunk=data_box()
data_chunk.news_subject=data.find('a').get_text()#제목 추출
# print(data_chunk.news_subject)
data_chunk.news_date=re.search('([0-9]{4}.[0-9]{2}.[0-9]{2})|([0-9]{1,2}일 전)|([0-9]{1,2}분 전)|([0-9]{1,2}시간 전)',data.dd.get_text()).group()#날자 추출(일반날자, 시간전, 일전)
# print(data_chunk.news_date)
data_chunk.news_press=data.dd.find('span').get_text()
# print(data_chunk.news_press)
data_chunk.news_link=data.find('dt').a['href']
# print(data_chunk.news_link)
if data.dd.a.get_text()=='네이버뉴스':
bs_tmp=BeautifulSoup(urllib.request.urlopen(data.dd.a['href']), 'lxml', from_encoding='utf-8')
if bs_tmp.find('div', {'id':'articleBodyContents'}):
data_chunk.news_text=bs_tmp.find('div', {'id':'articleBodyContents'}).get_text()
elif bs_tmp.find('div', {'id':'articeBody'}):
data_chunk.news_text=bs_tmp.find('div', {'id':'articeBody'}).get_text()
# print(data_chunk.news_text)
else:
data_chunk.news_text='Because of irregular form, unable to scrape news.'
data_chunk.news_text=data_chunk.news_text.replace('// flash ','')
data_chunk.news_text=data_chunk.news_text.replace('오류를 우회하기 위한 함수 추가','')
data_chunk.news_text=data_chunk.news_text.replace('function', '')
data_chunk.news_text=data_chunk.news_text.replace('_flash_removeCallback() {}', '')
data_chunk.news_text=data_chunk.news_text.strip()
# data_chunk.news_text=data_chunk.news_text.replace(',' , ' ') 엑셀 옵션일때는 안해도 됨
data_chunk.news_text=data_chunk.news_text.replace('\n' , '')
data_chunk.news_text=data_chunk.news_text.replace('\t' , ' ')
databox_list.append(data_chunk)
# print(data_chunk.news_subject)
for tmp_data in databox_list:
print(tmp_data.news_subject)
datapage_num+=1
# print('해당 url의 data추출이 끝났습니다.')
return databox_list
class Naver_datasaver:
def __init__(self, target_url):
splited_url=target_url.split('&')
for splited in range(len(splited_url)):
if 'ds=' in splited_url[splited]:
self.start_day=splited_url[splited].replace('ds=','')
elif 'de=' in splited_url[splited]:
self.end_day=splited_url[splited].replace('de=','')
elif 'query=' in splited_url[splited]:
self.query_name=splited_url[splited].replace('query=','')
# self.query_name=self.query_name.decode()
self.query_name=urllib.parse.unquote(urllib.parse.unquote(self.query_name))
else:
pass
self.saver_name=self.start_day+'-'+self.end_day+'년 '+self.query_name+' 뉴스 조사'
print('저장될 파일의 이름은', self.saver_name, '입니다.')
def save_excel(self, databox_list):
wt_row=0
wt_col=0
wt_workbook=xlsxwriter.Workbook(self.saver_name+'.xlsx', {'strings_to_urls':False})
wt_worksheet=wt_workbook.add_worksheet()
for i in range(len(databox_list)):
wt_worksheet.write(i,0,databox_list[i].news_subject)
print(databox_list[i].news_subject)
wt_worksheet.write(i,1,databox_list[i].news_date)
wt_worksheet.write(i,2,databox_list[i].news_press)
wt_worksheet.write(i,3,databox_list[i].news_link)
wt_worksheet.write(i,4,databox_list[i].news_text)
wt_workbook.close()
def save_csv(self, databox_list):
print('저장을 시작합니다.')
# self.saver_name=self.saver_name+'.csv'
file_stream=open(self.saver_name, 'w', encoding='utf-8')
for data_chunk in databox_list:
data_line=data_chunk.news_subject+','+data_chunk.news_date+','+data_chunk.news_press+','+data_chunk.news_link+','+data_chunk.news_text
file_stream.write(data_line)
file_stream.close()
return
def main():
# list_instance=Urllist()
crawler=Naver_crawler()
scraper=Naver_scraper()
press_cookie={'Cookie':'news_office_checked=1032,1020,1028'}
file_instance=open('urllist.txt', 'r')
the_list=file_instance.read().split('\n')
# the_list=Urllist.search_composer(target_url) #만약 static method라면
for list_url in the_list:#search_composer에서 추출한 url list에서
print(list_url)
source_stack=crawler.crawling(list_url, press_cookie)
databox_list=scraper.scraping(source_stack)
the_saver=Naver_datasaver(list_url)
print('파일 저장을 시작합니다.')
the_saver.save_excel(databox_list)
# for source in source_stack:
# news_count=bs_instance.find('div',{'class': 'title_desc all_my'}).span.get_text()
# print(re.search('[0-9]*,{0,1}[0-9]+건', news_count))
if __name__=='__main__':
main() | {"/all_scraper/naver_scraper/admin.py": ["/all_scraper/naver_scraper/models.py"], "/all_scraper/naver_scraper/views.py": ["/all_scraper/naver_scraper/models.py", "/all_scraper/naver_scraper/Urllist_server.py"]} |
66,944 | doldol1/Crawlaper | refs/heads/master | /Urlist.py | from bs4 import BeautifulSoup
import urllib.request
import datetime
import re
#이상한 검색어: asdkfdsl
# target_url='http://search.naver.com/search.naver?sm=tab_hty.top&where=news&'\
# 'query=asdkfdsl&'\
# 'oquery=%EA%B8%B0%EB%8A%A5%EC%84%B1%EC%8B%9D%ED%92%88&'\
# 'ie=utf8&tqi=TiH03spySDossvJNudwssssssvs-266552'
# https://search.naver.com/search.naver?where=news&query=%EA%B8%B0%EB%8A%A5%EC%84%B1%EC%8B%9D%ED%92%88&ie=utf8&sm=tab_opt&sort=0&photo=0&field=0&reporter_article=&pd=0&ds=2000.01.01&de=2013.05.03&docid=&nso=so%3Ar%2Cp%3Aall%2Ca%3Aall&mynews=1&mson=0&refresh_start=0&related=0
# https://search.naver.com/search.naver?where=news&query=%EA%B8%B0%EB%8A%A5%EC%84%B1%EC%8B%9D%ED%92%88&ie=utf8&sm=tab_opt&sort=0&photo=0&field=0&reporter_article=&pd=3&ds=2000.01.01&de=2013.05.03&docid=&nso=so%3Ar%2Cp%3Afrom20000101to20130503%2Ca%3Aall&mynews=1&mson=0&refresh_start=0&related=0
#해당 기간에 없는 검색어: 용팔이
target_url='https://search.naver.com/search.naver?where=news&'\
'query=%EC%9A%A9%ED%8C%94%EC%9D%B4&'\
'ie=utf8&sm=tab_opt&sort=0&photo=0&field=0&reporter_article=&pd=3&'\
'ds=2000.01.01&de=2000.01.02&docid=&'\
'nso=so%3Ar%2Cp%3Afrom20000101to20000102%2Ca%3Aall&mynews=0&'\
'mson=0&refresh_start=0&related=0'
#기능성식품 전 기간, 모든 옵션 미적용
target_url='https://search.naver.com/search.naver?where=news&'\
'query=%EA%B8%B0%EB%8A%A5%EC%84%B1%EC%8B%9D%ED%92%88&'\
'ie=utf8&sm=tab_opt&sort=0&photo=0&field=0&reporter_article=&pd=0&'\
'ds=&de=&docid=&'\
'nso=so%3Ar%2Cp%3Aall%2Ca%3Aall&mynews=1&'\
'mson=0&refresh_start=0&related=0'
class Urllist:
def __init__(self):
self.url_list=list()
self.MIN_RESULT=3000#최소한 이정도는 나올 수 있도록
self.MAX_RESULT=3900#결과가 더 나올 수 있으므로, 넉넉하게 여유 잡아
def url_changer(self, target_url, start, end):
splited_url=target_url.split('&')
for splited in range(len(splited_url)):
if 'ds=' in splited_url[splited]:
splited_url[splited]="ds="+start.strftime("%Y.%m.%d")
# print(start.strftime("%Y.%m.%d"))
elif 'de=' in splited_url[splited]:
splited_url[splited]="de="+end.strftime("%Y.%m.%d")
# print(end.strftime("%Y.%m.%d"))
elif 'pd=' in splited_url[splited]:
splited_url[splited]="pd=3"
elif 'nso=':
if 'from' in splited_url[splited]:
tmp_nso=splited_url[splited].split('from')[0]
splited_url[splited]=tmp_nso+'from'+start.strftime("%Y%m%d")+'to'+end.strftime("%Y%m%d")+'%2Ca%3Aall'
elif 'all' in splited_url[splited]:
splited_url[splited]=splited_url[splited].replace('all','')+'from'+start.strftime("%Y%m%d")+'to'+end.strftime("%Y%m%d")+'%2Ca%3Aall'
else:
pass
reunited_url='&'.join(splited_url)
return reunited_url
def news_counter(self, bs_instance):
news_count=bs_instance.find('div',{'class': 'title_desc all_my'}).span.get_text()
news_count=int(re.search('[0-9]*,{0,1}[0-9]+건', news_count).group().replace(',','').replace('건', ''))
return news_count
def search_composer(self, target_url, url_list=None, start=datetime.date(2000,1,1), end=datetime.date.today()):
url_list=url_list if url_list is not None else self.url_list
if end>datetime.date.today():
end=datetime.date.today()
composed_url=self.url_changer(target_url, start, end)
html=urllib.request.urlopen(composed_url)
bs_instance=BeautifulSoup(html, 'lxml', from_encoding='utf-8')
pivot=end-start
if bs_instance.find('div', {'class':'noresult_tab'}):#범위 내 검색결과 없음
if (start==datetime.date(2000,1,1)) & (end>=datetime.date.today()):#전체 없음
print('검색 결과값이 존재하지 않습니다.')
return url_list
else:#여기에만 없음
return self.search_composer(composed_url, url_list, start, start+pivot*1.5)
else:#검색결과가 존재한다면 재귀형식으로 작동하는 알고리즘 작성
news_count=self.news_counter(bs_instance)
# print('{0} to {1}: {2} data is searched'.format(start, end, news_count))
if self.MAX_RESULT<news_count:
return self.search_composer(composed_url, url_list, start, start+pivot*0.75)
else:
if self.MIN_RESULT>news_count:
if end==datetime.date.today():
url_list.append(target_url)
print('{0} to {1}: {2} data is searched, append'.format(start, end, news_count))
return url_list
elif end>datetime.date.today():
print('Error: Future news')
return None
else:
return self.search_composer(composed_url, url_list, start, start+pivot*1.5)
else:
url_list.append(target_url)
if end==datetime.date.today():
return url_list
elif end>datetime.date.today():
print('Error: Future news')
return None
else:
print('{0} to {1}: {2} data is searched, append'.format(start, end, news_count))
return self.search_composer(composed_url, url_list, end, end+pivot*1.5)
def main():
listed=Urllist()
the_list=listed.search_composer(target_url)
if __name__=='__main__':
main()
'''
if bs_instance.find('span', {'class': 'result_num'}):
num_result=bs_instance.find('span', {'class': 'result_num'}).get_text()
num_result=int(re.search('[0-9]*,{0,1}[0-9]+건', num_result).group().replace(',','').replace('건', ''))
# print(num_result)
td=end-start
pivot=td#(3900/num_result)*td
#pivot이 13035.714일 때 1년
# print(pivot)
if MIN_RESULT > num_result: ##최저기준치보다 결과값이 적을 때
if datetime.date.today() < end:
print('마지막 자료의 개수는',num_result,'개 이고',start.isoformat(),'부터', datetime.date.today(),'까지로 적당하므로, 리스트에 추가합니다.')
url_list.append(url_changer(composed_url, start, datetime.date.today()))
if url_list[-1] == url_list[-2]:
url_list.pop()
return url_list
else:
# print('자료의 개수는',num_result,'개 이고',start.isoformat(),'부터', end.isoformat(),'까지는 너무 짧습니다.')
return search_composer(composed_url, url_list, start, start+pivot*2)
elif MAX_RESULT < num_result: ##최고기준치보다 결과값이 많을 때
# print('자료의 개수는',num_result,'개 이고',start.isoformat(),'부터', end.isoformat(),'까지는 너무 깁니다.')
return search_composer(composed_url, url_list, start, start+pivot*0.75)
else:
print('자료의 개수는',num_result,'개 이고',start.isoformat(),'부터', end.isoformat(),'까지로 적당하므로, 리스트에 추가합니다.')
url_list.append(composed_url)
# search_composer(composed_url, url_list, start+pivot, end+pivot)
return search_composer(composed_url, url_list, start+datetime.timedelta(1)+pivot, end+pivot)
else:
if (start==datetime.date(2000,1,1)) & (end==datetime.date.today()):
print('검색 결과값이 존재하지 않습니다.')
else:
print('해당 페이지의 값이 존재하지 않습니다.')
return search_composer(composed_url, start, start+datetime.timedelta(365), url_list)
'''
| {"/all_scraper/naver_scraper/admin.py": ["/all_scraper/naver_scraper/models.py"], "/all_scraper/naver_scraper/views.py": ["/all_scraper/naver_scraper/models.py", "/all_scraper/naver_scraper/Urllist_server.py"]} |
66,945 | doldol1/Crawlaper | refs/heads/master | /all_scraper/naver_scraper/models.py | from django.db import models
# Create your models here.
##############Value######################
##SearchCondition class
##pk: auto-generated primary key
##keyword: keyword of crawler
##ds: start date, DateField.
##de: end date, DateField.
##search_mode: condition flag, 0 is subject only search, 1is subject+body search, IntegerField.
##press_code: code of newspaper press, CharField. In Naver, each press searching condition have their code. Daily newspaper press codes are
##1032, 1005, 2312, 1020, 2385, 1021, 1081, 1022, 2268, 1023, 1025, 1028, 1469
## and each codes are matched press name below:
## gyunghyang, kookmin, naeil, donga, maeil, munhwa, seoul, saegyae, asiatoday, josun, joongang, hangyurae, hankook
##comment: press informations are sent to server through cooke: news_office_checked=1032, 1005, 2312, ... If cookie have no news_office_checked, server will searching all news without press condition.
##essential_word: essential_word is for detailed search-'+' should be written in front of essential word, CharField
##exact_word: exact_word is for detailed search-it should be written between "". CharField
##except_word: except_word is for detailed search-'-' should be written in front of excepting word, CharField
##SearchResult class
##pk: auto-generated primary key
##news_subject: subject of news, CharField
##news_date: date of news published, DateField.
##news_press: press company of news, CharField.
##news_url: url of news published, CharField.
##news_body: body of news, CharField.
#################################################
from django.db import models
class SearchCondition(models.Model):
key_word=models.CharField(max_length=500)
ds=models.DateField('start date')
de=models.DateField('end date')
press_codes=models.CharField(max_length=500)
search_mode=models.IntegerField()
essential_word=models.CharField(max_length=200)
exact_word=models.CharField(max_length=200)
except_word=models.CharField(max_length=200)
def __init__(self):
print('SearchCondition instance created')
class SearchResult(models.Model):
search_condition=models.ForeignKey(SearchCondition, on_delete=models.CASCADE)
news_subject=models.CharField(max_length=1000)
news_date=models.DateField('news published date')
news_press=models.CharField(max_length=100)
news_url=models.CharField(max_length=2000)
news_body=models.CharField(max_length=100000)
| {"/all_scraper/naver_scraper/admin.py": ["/all_scraper/naver_scraper/models.py"], "/all_scraper/naver_scraper/views.py": ["/all_scraper/naver_scraper/models.py", "/all_scraper/naver_scraper/Urllist_server.py"]} |
66,946 | doldol1/Crawlaper | refs/heads/master | /refiner.py | #string refiner: remove special character
import re
import xlrd
import xlwt
import os
#file_name='D:\scrapper\노화\\2007~2008년 노화 뉴스 스크래핑.xls'
def refiner(file_name):
#사용할 엑셀 sheet번호
wt_sheetnum=1
#엑셀 읽기
rd_workbook=xlrd.open_workbook(file_name)
rd_worksheet=rd_workbook.sheet_by_index(0)
#엑셀 쓰기
wt_workbook=xlwt.Workbook(encoding='utf-8')
wt_worksheet=wt_workbook.add_sheet('Sheet'+str(wt_sheetnum))
#읽은 엑셀 파일의 총 row와 col수
tot_rows=rd_worksheet.nrows
tot_cols=rd_worksheet.ncols
#읽을 엑셀 파일 row, col
rd_row=0
rd_col=4
#쓸 파일의 row, col
wt_row=0
wt_col=4
while rd_row < tot_rows:
while True:
#print('{}, {}'.format(rd_row, rd_col))
try:
cell=rd_worksheet.cell_value(rd_row, rd_col)
except:
#print('개행 {}'.format(rd_row))
rd_col=4
wt_col=4
break
if cell:
wt_worksheet.write(wt_row, wt_col, re.sub("[^().가-힣0-9a-zA-Z\\s]","",cell))
rd_col+=1
wt_col+=1
#print(cell)
else:
rd_col=4
wt_col=4
break
wt_row+=1
rd_row+=1
file_name=file_name.replace('.xls','')+'_ref.xls'
wt_workbook.save(file_name)
#wt_workbook.save('a.xls')
def main():
file_list=os.listdir('D:\scrapper\삶의 질')
print(file_list)
for file_name in file_list:
refiner('D:\scrapper\삶의 질\\'+file_name)
refiner('D:\scrapper\삶의 질\\2016년 7월~2017년 5월 17일 \'삶의 질\' 뉴스 스크래핑.xls')
if __name__=='__main__':
main()
| {"/all_scraper/naver_scraper/admin.py": ["/all_scraper/naver_scraper/models.py"], "/all_scraper/naver_scraper/views.py": ["/all_scraper/naver_scraper/models.py", "/all_scraper/naver_scraper/Urllist_server.py"]} |
66,947 | doldol1/Crawlaper | refs/heads/master | /all_scraper/naver_scraper/Urlmake_server.py | import urllib.parse
from datetime import datetime
def url_make(condition):
sample_url='https://search.naver.com/search.naver?&where=news&'\
'query=%EC%84%B8%EC%9B%94%ED%98%B8&'\
'sm=tab_pge&sort=0&photo=0&field=1&reporter_article=&pd=3&'\
'ds=2014.04.14&de=2018.03.05&docid=&'\
'nso=so:r,p:from20140414to20180305,a:t&mynews=1&cluster_rank=38&'\
'start=1&refresh_start=0'
splited_url=sample_url.split('&')
dateDS_instance=condition.ds
dateDE_instance=condition.de
#################수정 필요#####################
# if condition.ds:
# dateDS_instance=datetime.strptime(condition.ds, "%Y-%m-%d")
# else:
# dateDS_instance=datetime.strptime('2000-01-01', "%Y-%m-%d")
# if condition.de:
# dateDE_instance=datetime.strptime(condition.de, "%Y-%m-%d")
# else:
# dateDE_instance=datetime.today()
############################################
for splited in range(len(splited_url)):
if 'query=' in splited_url[splited]:
full_query=urllib.parse.quote(condition.key_word)+'+"'+urllib.parse.quote(condition.exact_word)+'"+'+urllib.parse.quote('+'+condition.essential_word)+'+-'+urllib.parse.quote(condition.except_word)
splited_url[splited]="query="+full_query
# print(splited_url[splited])
elif 'ds=' in splited_url[splited]:
splited_url[splited]="ds="+dateDS_instance.strftime("%Y.%m.%d")
# print(splited_url[splited])
elif 'de=' in splited_url[splited]:
splited_url[splited]="de="+dateDE_instance.strftime("%Y.%m.%d")
# print(splited_url[splited])
elif 'pd=' in splited_url[splited]:
splited_url[splited]="pd=3"
# print(splited_url[splited])
elif 'nso=':#언론사와도 관련있는 듯 하니.. 참고
if 'from' in splited_url[splited]:
tmp_nso=splited_url[splited].split('from')[0]
splited_url[splited]=tmp_nso+'from'+dateDS_instance.strftime("%Y%m%d")+'to'+dateDE_instance.strftime("%Y%m%d")+'%2Ca%3Aall'
# print(splited_url[splited])
elif 'all' in splited_url[splited]:
splited_url[splited]=splited_url[splited].replace('all','')+'from'+dateDS_instance.strftime("%Y%m%d")+'to'+dateDE_instance.strftime("%Y%m%d")+'%2Ca%3Aall'
# print(splited_url[splited])
else:
pass
reunited_url='&'.join(splited_url)
print(reunited_url)
return reunited_url | {"/all_scraper/naver_scraper/admin.py": ["/all_scraper/naver_scraper/models.py"], "/all_scraper/naver_scraper/views.py": ["/all_scraper/naver_scraper/models.py", "/all_scraper/naver_scraper/Urllist_server.py"]} |
66,948 | doldol1/Crawlaper | refs/heads/master | /all_scraper/naver_scraper/apps.py | from django.apps import AppConfig
class NaverScraperConfig(AppConfig):
name = 'naver_scraper'
| {"/all_scraper/naver_scraper/admin.py": ["/all_scraper/naver_scraper/models.py"], "/all_scraper/naver_scraper/views.py": ["/all_scraper/naver_scraper/models.py", "/all_scraper/naver_scraper/Urllist_server.py"]} |
66,949 | doldol1/Crawlaper | refs/heads/master | /scrapper_ver2.2_distributed.py | from bs4 import BeautifulSoup
import urllib.request
import xlwt
import datetime
import re
#-*- coding: utf-8 -*-
##네이버 검색 url쿼리 규칙
# exist: 언론사 코드가 들어가는 곳
# ie: 인코딩인 듯, 확인 필요
# query: 검색어
# sm: title.basic이면 제목에서만, all.basic이면 제목+본문 곳에서
# startDate, endDate: 시작 날자와 끝 날자, 만약 아무 것도 안들어갔다면 startDate는 1960년 1월 1일, endDate는 가장 최근의 날자를 가리킨다.
#주소 추가
# OR_URLS=['http://news.naver.com/main/search/search.nhn?'\
# 'rcnews=exist%3A032%3A005%3A086%3A020%3A021%3A081%3A022%3A023%3A025%'\
# '3A028%3A038%3A469%3A&refresh=&so=rel.dsc&stPhoto=&stPaper=&stRelease=&'\
# 'ie=MS949&detail=0&rcsection=&query=%C0%CF%C0%DA%B8%AE&sm=title.basic&pd=4&'\
# 'startDate=1960-01-01&endDate=2009-12-31', 'http://news.naver.com/main/search/search.nhn?'\
# 'rcnews=exist%3A032%3A005%3A086%3A020%3A021%3A081%3A022%3A023%3A025%'\
# '3A028%3A038%3A469%3A&refresh=&so=rel.dsc&stPhoto=&stPaper=&stRelease=&'\
# 'ie=MS949&detail=0&rcsection=&query=%C0%CF%C0%DA%B8%AE&sm=title.basic&'\
# 'pd=4&startDate=2010-01-01&endDate=2012-12-31','http://news.naver.com/main/search/search.nhn?'\
# 'rcnews=exist%3A032%3A005%3A086%3A020%3A021%3A081%3A022%3A023%3A025%'\
# '3A028%3A038%3A469%3A&refresh=&so=rel.dsc&stPhoto=&stPaper=&stRelease=&'\
# 'ie=MS949&detail=0&rcsection=&query=%C0%CF%C0%DA%B8%AE&sm=title.basic&pd=4&'\
# 'startDate=2013-01-01&endDate=2015-12-31', 'http://news.naver.com/main/search/search.nhn?'\
# 'rcnews=exist%3A032%3A005%3A086%3A020%3A021%3A081%3A022%3A023%3A025%'\
# '3A028%3A038%3A469%3A&refresh=&so=rel.dsc&stPhoto=&stPaper=&stRelease=&'\
# 'ie=MS949&detail=0&rcsection=&query=%C0%CF%C0%DA%B8%AE&sm=title.basic&pd=4&'\
# 'startDate=2016-01-01&endDate=2017-06-19', 'http://news.naver.com/main/search/search.nhn?'\
# 'rcnews=exist%3A032%3A005%3A086%3A020%3A021%3A081%3A022%3A023%3A025%'\
# '3A028%3A038%3A469%3A&refresh=&so=rel.dsc&stPhoto=&stPaper=&stRelease=&'\
# 'ie=MS949&detail=0&rcsection=&query=%BD%C7%BE%F7&sm=title.basic&pd=4&'\
# 'startDate=1960-01-01&endDate=2011-12-31', 'http://news.naver.com/main/search/search.nhn?'\
# 'rcnews=exist%3A032%3A005%3A086%3A020%3A021%3A081%3A022%3A023%3A025%'\
# '3A028%3A038%3A469%3A&refresh=&so=rel.dsc&stPhoto=&stPaper=&stRelease=&'\
# 'ie=MS949&detail=0&rcsection=&query=%BD%C7%BE%F7&sm=title.basic&pd=4&'\
# 'startDate=2012-01-01&endDate=2017-06-19']
# s='http://news.naver.com/main/search/search.nhn?'\
# 'rcnews=exist%3A032%3A005%3A086%3A020%3A021%3A081%3A022%3A023%3A025%'\
# '3A028%3A038%3A469%3A&refresh=&so=rel.dsc&stPhoto=&stPaper=&stRelease=&'\
# 'ie=MS949&detail=0&rcsection=&query=%C7%D8%BE%E7%BD%C9%C3%FE%BC%F6&'\
# 'sm=all.basic&pd=4&startDate=2017-01-01&endDate=2017-06-28'
# OR_URLS=['http://news.naver.com/main/search/search.nhn?'\
# 'rcnews=exist%3A032%3A005%3A086%3A020%3A021%3A081%3A022%3A023%3A025%'\
# '3A028%3A038%3A469%3A&refresh=&so=rel.dsc&stPhoto=&stPaper=&stRelease=&'\
# 'ie=MS949&detail=0&rcsection=&query=%C7%D8%BE%E7%BD%C9%C3%FE%BC%F6&'\
# 'sm=all.basic&pd=4&startDate=2017-01-01&endDate=2017-06-28']
################리스트 자동 생성기#########################
#베이스url
#http://news.naver.com/main/search/search.nhn
target_url='http://news.naver.com/main/search/search.nhn?rcnews=exist%3A032%3A005%3A086%'\
'3A020%3A021%3A081%3A022%3A023%3A025%3A028%3A038%3A469%3A&'\
'refresh=&so=rel.dsc&stPhoto=&stPaper=&stRelease=&ie=MS949&detail=0&'\
'rcsection=&query=%B1%D4%C1%A6&sm=all.basic&pd=1&startDate=&endDate='
# target_url='http://news.naver.com/main/search/search.nhn?rcnews=exist%3A032%3A005%3A086%'\
# '3A020%3A021%3A081%3A022%3A023%3A025%3A028%3A038%3A469%3A&'\
# 'refresh=&so=rel.dsc&stPhoto=&stPaper=&stRelease=&ie=MS949&detail=0&'\
# 'rcsection=&query=%B5%E5%B7%A1%B0%EF&sm=all.basic&pd=1&startDate=&endDate='
MIN_RESULT=3000#최소한 이정도는 나올 수 있도록
MAX_RESULT=3900#결과가 더 나올 수 있으므로, 넉넉하게 여유 잡아
def url_changer(target_url, start, end):
splited_url=target_url.split('&')
splited_url[-2]='startDate='+start.isoformat()
splited_url[-1]='endDate='+end.isoformat()
reunited_url='&'.join(splited_url)
# print(target_url,'에서',reunited_url,'로 url이 변경되었습니다.')
return reunited_url
def search_composer(target_url, url_list, start=datetime.date(2000,1,1), end=datetime.date.today()):
###2017.07.21 할 일: 컨텐츠가 없을 경우의 핸들링 필요
composed_url=url_changer(target_url, start, end)
html=urllib.request.urlopen(composed_url)
bs_instance=BeautifulSoup(html, 'lxml', from_encoding='utf-8')
if bs_instance.find('span', {'class': 'result_num'}):
num_result=bs_instance.find('span', {'class': 'result_num'}).get_text()
num_result=int(re.search('[0-9]*,{0,1}[0-9]+건', num_result).group().replace(',','').replace('건', ''))
# print(num_result)
td=end-start
pivot=td#(3900/num_result)*td
#pivot이 13035.714일 때 1년
# print(pivot)
if MIN_RESULT > num_result: ##최저기준치보다 결과값이 적을 때
if datetime.date.today() < end:
print('마지막 자료의 개수는',num_result,'개 이고',start.isoformat(),'부터', datetime.date.today(),'까지로 적당하므로, 리스트에 추가합니다.')
url_list.append(url_changer(composed_url, start, datetime.date.today()))
if url_list[-1] == url_list[-2]:
url_list.pop()
return url_list
else:
# print('자료의 개수는',num_result,'개 이고',start.isoformat(),'부터', end.isoformat(),'까지는 너무 짧습니다.')
return search_composer(composed_url, url_list, start, start+pivot*2)
elif MAX_RESULT < num_result: ##최고기준치보다 결과값이 많을 때
# print('자료의 개수는',num_result,'개 이고',start.isoformat(),'부터', end.isoformat(),'까지는 너무 깁니다.')
return search_composer(composed_url, url_list, start, start+pivot*0.75)
else:
print('자료의 개수는',num_result,'개 이고',start.isoformat(),'부터', end.isoformat(),'까지로 적당하므로, 리스트에 추가합니다.')
url_list.append(composed_url)
# search_composer(composed_url, url_list, start+pivot, end+pivot)
return search_composer(composed_url, url_list, start+datetime.timedelta(1)+pivot, end+pivot)
else:
if (start==datetime.date(2000,1,1)) & (end==datetime.date.today()):
print('검색 결과값이 존재하지 않습니다.')
else:
print('해당 페이지의 값이 존재하지 않습니다.')
return search_composer(composed_url, start, start+datetime.timedelta(365), url_list)
#######################################################
# 출력 파일 명
OUTPUT_FILE_NAME = ''
# 스크래핑 함수
def get_text(OR_URLS, OUTPUT_FILE_NAME):
for OR_URL in OR_URLS:
excel_file=xlwt.Workbook(encoding='utf-8')
excel_sheet=excel_file.add_sheet('Sheet1')
print(OR_URL)
t=0
source_stack=list()
while(t <5):
URL=OR_URL #URL초기화
text='' #파일에 저장할 변수
dup='' #중복 페이지 여부 확인
num=0 #row의 숫자를 맞춰주기 위한 변수.
t=t+1 #스크래핑할 col
OUTPUT_FILE_NAME=set_stp(t=t)
print(str(OUTPUT_FILE_NAME)+' started')
i=1 #스크래핑할 페이지의 수
while i < 401: #페이지 수 400개(최대 한계)로 설정
source=''
#URL 변경
URL=chkURL(i=i, URL=URL)
#URL 요철을 열 때 예외 발생 확률이 높으므로 try사용
try:
if(t is 1):
source_code_from_URL = urllib.request.urlopen(URL)
soup = BeautifulSoup(source_code_from_URL, 'lxml', from_encoding='utf-8')
source_stack.append(soup)
else:
soup =source_stack[i-1]
except urllib.error.URLError as e:
print(e.reason)
break
#soup에 해당 URL페이지의 내용이 담긴다.
#종료 여부 확인
tmp2=soup.find('div', {"class" : "paging"}).select("strong")
#print(tmp2)
if(dup==tmp2):
print('end')
break
else:
dup=tmp2
#term_extractor로 soup에 담긴 data를 추출한다.
num=term_extractor(t=t, soup=soup, num=num, excel_sheet=excel_sheet)
#term_extractor의 외부에 있기 때문에, 정확한 row의 개수를 확인할 때 num을 사용하면 안 됨
if(t==5):
print(i,'번째 페이지 완료')
i=i+1
print('Process end. i='+str(i)+'\n')
#저장할 파일 이름을 excel_file.save에 입력
excel_name=OR_URL.split('&')
excel_file.save(excel_name[-2]+'부터'+excel_name[-1]+'.xls')
print(OR_URL)
#진행현황 네이밍
def set_stp(t):
if(t==1):
OUTPUT_FILE_NAME = 'Subject'
elif(t==2):
OUTPUT_FILE_NAME = 'Press'
elif(t==3):
OUTPUT_FILE_NAME = 'Time'
elif(t==4):
OUTPUT_FILE_NAME = 'News URL'
elif(t==5):
OUTPUT_FILE_NAME = 'Contents'
else:
OUTPUT_FILE_NAME = 'error'
print('Try error: Filing')
return OUTPUT_FILE_NAME
#항에서 data 추출
def term_extractor(t, soup, num, excel_sheet) :
if(t==1):
for item in soup.find_all('div', {"class" : "ct"}):
tmp1=item.select(".tit")
excel_sheet.write(num, 0, tmp1[0].text)
num=num+1
elif(t==2):
for item in soup.find_all('div', {"class" : "info"}):
tmp1=item.select(".press")
excel_sheet.write(num, 1, tmp1[0].text)
num=num+1
elif(t==3):
for item in soup.find_all('div', {"class" : "info"}):
tmp1=item.select(".time")
ref_time=chk_time(tmp1[0].text)
excel_sheet.write(num, 2, ref_time)
num=num+1
elif(t==4):
for item in soup.find_all('div', {'class':'ct'}):
'''
tmp1=str(item).split()
tmp1[2]=tmp1[2].replace("href=", "")
tmp1[2]=tmp1[2].strip('"')
'''
tmp1=str(item).split()
splited_item=[s for s in tmp1 if "href" in s]
splited_item[0]=splited_item[0].replace("href=", "")
body_URL=splited_item[0].strip('"')
body_URL=body_URL.replace("amp;",'')
#둘 중 하나 사용할 것. 하이퍼링크는 복붙이 안되는 문제좀 존재.......
excel_sheet.write(num, 3, body_URL)
#excel_sheet.write(num, 3, xlwt.Formula('HYPERLINK("%s")' % body_URL))
num=num+1
elif(t==5):#일부 매우 마이너한 언론은 본문 안됨
for item in soup.find_all('div', {'class':'info'}):
try:
tmp1=str(item).split()
splited_item=[s for s in tmp1 if "href" in s]
splited_item[0]=splited_item[0].replace("href=", "")
body_URL=splited_item[0].strip('"')
body_URL=body_URL.replace("amp;",'')
news_body_URL=urllib.request.urlopen(body_URL)
body= BeautifulSoup(news_body_URL, 'lxml', from_encoding='utf-8')
except:
print('본문을 가져올 URL에 문제가 있어 공란으로 처리합니다.')
#일반 네이버뉴스
try:
if body_URL is '#' :
body_st=""
elif body.find('div', id="articleBodyContents"):
body_st=body.find('div', id="articleBodyContents").text
body_st=body_st.strip()
body_st=body_st.replace("// flash 오류를 우회하기 위한 함수 추가", "")
body_st=body_st.strip()
body_st=body_st.replace("function _flash_removeCallback() {}", "")
body_st=body_st.strip()
#연애 부문(실제로 모두 연애 내용은 아님) 네이버뉴스
elif body.find('div', id='articeBody'):
body_st=body.find('div', id="articeBody").text
body_st=body_st.strip()
#print(str(body.find('div', id='articleBody')))
#body_st=body.find('div', id='articleBody').text
else:
body_st='해당 뉴스는 스포츠/연애 부문 언론사의 본문이며, 자료를 가져올 수 없습니다.'
body_st=re.sub("[^().,가-힣0-9a-zA-Z\\s]","",body_st)
content_length=len(body_st)//30000
content_ed=len(body_st)+1
while(content_length >= 0):
content_st=content_length*30000
excel_sheet.write(num, 4+content_length, body_st[content_st:content_ed])
content_ed=content_st
content_length=content_length-1
except:
print("parsing exception")
body_st='해당 뉴스는 자료를 가져올 수 없습니다.'
#print(len(body_st))
#if len(body_st)> 30000:
# excel_sheet.write(num, 4, body_st[:30001])
# excel_sheet.write(num, 5, body_st[30000:])
#else:
# excel_sheet.write(num, 4, body_st)
# print('seperate')
num=num+1
else:
print('Try error: Scrapping')
return num
#시간 변환기
def chk_time(tmp1):
tmp1=tmp1.strip()
if tmp1.count('일전') is not 0:
print(tmp1)
tmp1='-'+tmp1.replace('일전', '')
c_time=datetime.datetime.now()+datetime.timedelta(int(tmp1))
time_list=str(c_time).split()
ymd=str(time_list[0]).split('-')
tmp1=ymd[0]+'.'+ymd[1]+'.'+ymd[2]
elif tmp1.count('시간전') is not 0:
print(tmp1)
tmp1='-'+tmp1.replace('시간전', '')
c_time=datetime.datetime.now()+datetime.timedelta(hours=int(tmp1))
time_list=str(c_time).split()
ymd=str(time_list[0]).split('-')
tmp1=ymd[0]+'.'+ymd[1]+'.'+ymd[2]
elif tmp1.count('분전') is not 0:
print(tmp1)
c_time=datetime.datetime.now()
time_list=str(c_time).split()
ymd=str(time_list[0]).split('-')
tmp1=ymd[0]+'.'+ymd[1]+'.'+ymd[2]
return tmp1
#URL을 자동으로 변경시킨다.
def chkURL(i, URL):
#네이버 뉴스 특성상 URL의 맨 뒤에 page='페이지번호n'이 있으며 if-elif문은 URL의 '페이지번호n'을 바꿔주는 역할을 한다.
if URL.count('page=') is 0:
URL=URL+'&page=1'
if i >= 1 and i < 10:
URL=URL[:-1]+str(i)
elif i >= 10 and i < 100:
if i==10:
URL=URL[:-1]+str(i)
else:
URL=URL[:-2]+str(i)
elif i >= 100 and i < 1000:
if i==100:
URL=URL[:-2]+str(i)
else:
URL=URL[:-3]+str(i)
elif i >= 1000 and i < 10000:
if i==1000:
URL=URL[:-3]+str(i)
else:
URL=URL[:-4]+str(i)
elif i >= 10000 and i < 100000:
if i==10000:
URL=URL[:-4]+str(i)
else:
URL=URL[:-5]+str(i)
elif i >= 100000 and i < 1000000:
if i==100000:
URL=URL[:-5]+str(i)
else:
URL=URL[:-6]+str(i)
elif i >= 1000000 and i < 10000000:
if i==1000000:
URL=URL[:-6]+str(i)
else:
URL=URL[:-7]+str(i)
return URL
# 메인 함수
def main():
OR_URLS=list()
OR_URLS=search_composer(target_url, OR_URLS)
print('url 추출 작업 완료')
get_text(OR_URLS=OR_URLS, OUTPUT_FILE_NAME=OUTPUT_FILE_NAME)
if __name__ == '__main__':
main() | {"/all_scraper/naver_scraper/admin.py": ["/all_scraper/naver_scraper/models.py"], "/all_scraper/naver_scraper/views.py": ["/all_scraper/naver_scraper/models.py", "/all_scraper/naver_scraper/Urllist_server.py"]} |
66,950 | doldol1/Crawlaper | refs/heads/master | /naver_scraper_ver0.1.py | #다시 만들어야 된다.. 젠장...
#변경된 스크래퍼는 GET방식으로 쿼리문이 비교적 간단해짐.. 여전히 4,000개 검색이 한계치임
#cluster_rank라는 요상한 쿼리가 생겼는데... hop의 횟수를 확인하는 파라미터인듯 하다... 만약 계속적인 정보 수집시 문제가 될 수 있으므로 0으로 초기화하는 것이 좋을 듯
#페이지 이동과 끝
from bs4 import BeautifulSoup
import urllib.request
import xlwt
import datetime
import re
'''
https://search.naver.com/search.naver?where=news&query=%EA%B8%B0%EB%8A%A5%EC%84%B1%EC%8B%9D%ED%92%88&ie=utf8&sm=tab_opt&sort=0&photo=0&field=0&reporter_article=&pd=0&ds=&de=&docid=&nso=&mynews=1&mson=0&refresh_start=0&related=0
https://search.naver.com/search.naver?ie=utf8&where=news&query=%EA%B8%B0%EB%8A%A5%EC%84%B1%EC%8B%9D%ED%92%88&sm=tab_pge&sort=0&photo=0&field=0&reporter_article=&pd=0&ds=&de=&docid=&nso=so:r,p:all,a:all&mynews=1&cluster_rank=52&start=1&refresh_start=0
https://search.naver.com/search.naver?where=news&query=%EA%B8%B0%EB%8A%A5%EC%84%B1%EC%8B%9D%ED%92%88&ie=utf8&sm=tab_opt&sort=0&photo=0&field=0&reporter_article=&pd=0&ds=&de=&docid=&nso=so%3Ar%2Cp%3Aall%2Ca%3Aall&mynews=1&mson=0&refresh_start=0&related=0
https://search.naver.com/search.naver?ie=utf8&where=news&query=%EA%B8%B0%EB%8A%A5%EC%84%B1%EC%8B%9D%ED%92%88&sm=tab_pge&sort=0&photo=0&field=0&reporter_article=&pd=0&ds=&de=&docid=&nso=so:r,p:all,a:all&mynews=1&cluster_rank=39&start=11&refresh_start=0
'''
#이상한 검색어: asdkfdsl
# target_url='http://search.naver.com/search.naver?sm=tab_hty.top&where=news&'\
# 'query=asdkfdsl&'\
# 'oquery=%EA%B8%B0%EB%8A%A5%EC%84%B1%EC%8B%9D%ED%92%88&'\
# 'ie=utf8&tqi=TiH03spySDossvJNudwssssssvs-266552'
#해당 기간에 없는 검색어: 용팔이
target_url='https://search.naver.com/search.naver?where=news&'\
'query=%EC%9A%A9%ED%8C%94%EC%9D%B4&'\
'ie=utf8&sm=tab_opt&sort=0&photo=0&field=0&reporter_article=&pd=3&'\
'ds=2000.01.01&de=2000.01.02&docid=&'\
'nso=so%3Ar%2Cp%3Afrom20000101to20000102%2Ca%3Aall&mynews=0&'\
'mson=0&refresh_start=0&related=0'
#기능성식품 전 기간, 모든 옵션 미적용
target_url='https://search.naver.com/search.naver?where=news&'\
'query=%EA%B8%B0%EB%8A%A5%EC%84%B1%EC%8B%9D%ED%92%88&'\
'ie=utf8&sm=tab_opt&sort=0&photo=0&field=0&reporter_article=&pd=0&'\
'ds=&de=&docid=&'\
'nso=so%3Ar%2Cp%3Aall%2Ca%3Aall&mynews=1&'\
'mson=0&refresh_start=0&related=0'
MIN_RESULT=3000#최소한 이정도는 나올 수 있도록
MAX_RESULT=3900#결과가 더 나올 수 있으므로, 넉넉하게 여유 잡아
def url_changer(target_url, start, end):
splited_url=target_url.split('&')
for splited in range(len(splited_url)):
if 'ds=' in splited_url[splited]:
splited_url[splited]=='ds='+start.strftime("%Y.%m.%d")
print(start.strftime("%Y.%m.%d"))
elif 'de=' in splited_url[splited]:
splited_url[splited]=='de='+end.strftime("%Y.%m.%d")
print(end.strftime("%Y.%m.%d"))
else:
pass
reunited_url='&'.join(splited_url)
# print(target_url,'에서',reunited_url,'로 url이 변경되었습니다.')
return reunited_url
def search_composer(target_url, url_list, start=datetime.date(2000,1,1), end=datetime.date.today()):
composed_url=url_changer(target_url, start, end)
# composed_url=target_url#임시 코드 위의 composed_url=url_changer(target_url, start, end)로 수정해야 함
html=urllib.request.urlopen(composed_url)
bs_instance=BeautifulSoup(html, 'lxml', from_encoding='utf-8')
if bs_instance.find('div', {'class':'noresult_tab'}):#범위 내 검색결과 없음
if (start==datetime.date(2000,1,1)) & (end==datetime.date.today()):#전체 없음
print('검색 결과값이 존재하지 않습니다.')
else:#여기에만 없음
return search_composer(composed_url, start, start+datetime.timedelta(365), url_list)
else:#검색결과가 존재한다면 재귀형식으로 작동하는 알고리즘 작성
print('it soom!')
'''
if bs_instance.find('span', {'class': 'result_num'}):
num_result=bs_instance.find('span', {'class': 'result_num'}).get_text()
num_result=int(re.search('[0-9]*,{0,1}[0-9]+건', num_result).group().replace(',','').replace('건', ''))
# print(num_result)
td=end-start
pivot=td#(3900/num_result)*td
#pivot이 13035.714일 때 1년
# print(pivot)
if MIN_RESULT > num_result: ##최저기준치보다 결과값이 적을 때
if datetime.date.today() < end:
print('마지막 자료의 개수는',num_result,'개 이고',start.isoformat(),'부터', datetime.date.today(),'까지로 적당하므로, 리스트에 추가합니다.')
url_list.append(url_changer(composed_url, start, datetime.date.today()))
if url_list[-1] == url_list[-2]:
url_list.pop()
return url_list
else:
# print('자료의 개수는',num_result,'개 이고',start.isoformat(),'부터', end.isoformat(),'까지는 너무 짧습니다.')
return search_composer(composed_url, url_list, start, start+pivot*2)
elif MAX_RESULT < num_result: ##최고기준치보다 결과값이 많을 때
# print('자료의 개수는',num_result,'개 이고',start.isoformat(),'부터', end.isoformat(),'까지는 너무 깁니다.')
return search_composer(composed_url, url_list, start, start+pivot*0.75)
else:
print('자료의 개수는',num_result,'개 이고',start.isoformat(),'부터', end.isoformat(),'까지로 적당하므로, 리스트에 추가합니다.')
url_list.append(composed_url)
# search_composer(composed_url, url_list, start+pivot, end+pivot)
return search_composer(composed_url, url_list, start+datetime.timedelta(1)+pivot, end+pivot)
else:
if (start==datetime.date(2000,1,1)) & (end==datetime.date.today()):
print('검색 결과값이 존재하지 않습니다.')
else:
print('해당 페이지의 값이 존재하지 않습니다.')
return search_composer(composed_url, start, start+datetime.timedelta(365), url_list)
'''
# 메인 함수
def main():
OR_URLS=list()
OR_URLS=search_composer(target_url, OR_URLS)
print('url 추출 작업 완료')
get_text(OR_URLS=OR_URLS, OUTPUT_FILE_NAME=OUTPUT_FILE_NAME)
if __name__ == '__main__':
main() | {"/all_scraper/naver_scraper/admin.py": ["/all_scraper/naver_scraper/models.py"], "/all_scraper/naver_scraper/views.py": ["/all_scraper/naver_scraper/models.py", "/all_scraper/naver_scraper/Urllist_server.py"]} |
66,951 | doldol1/Crawlaper | refs/heads/master | /all_scraper/naver_scraper/admin.py | from django.contrib import admin
from .models import SearchCondition, SearchResult
# Register your models here.
admin.site.register(SearchCondition)
admin.site.register(SearchResult)
| {"/all_scraper/naver_scraper/admin.py": ["/all_scraper/naver_scraper/models.py"], "/all_scraper/naver_scraper/views.py": ["/all_scraper/naver_scraper/models.py", "/all_scraper/naver_scraper/Urllist_server.py"]} |
66,952 | doldol1/Crawlaper | refs/heads/master | /all_scraper/naver_scraper/urls.py | from django.urls import path
from . import views
app_name='naver_scraper'
urlpatterns=[
path('', views.index, name='index'),
path('crawling/', views.crawling, name='naver_crawling'),
]
| {"/all_scraper/naver_scraper/admin.py": ["/all_scraper/naver_scraper/models.py"], "/all_scraper/naver_scraper/views.py": ["/all_scraper/naver_scraper/models.py", "/all_scraper/naver_scraper/Urllist_server.py"]} |
66,953 | doldol1/Crawlaper | refs/heads/master | /file_integrate.py | import xlsxwriter
import xlrd
import os
def filter(file_names):
#통합할 파일
wt_app_sheetnum=1
wt_app_workbook=xlsxwriter.Workbook('integrated_file.xlsx', {'strings_to_urls':False})
wt_app_worksheet=wt_app_workbook.add_worksheet()
#쓸 파일의 row, col
wt_row=0
wt_col=0
for file_name in file_names:
if 'xls' or 'xlsx' in file_name:
#기사 선별
ex_path=os.getcwd()+'\\'+file_name
print(ex_path)
#읽을 파일
rd_workbook=xlrd.open_workbook(ex_path)
rd_worksheet=rd_workbook.sheet_by_index(0)
#읽은 엑셀 파일의 총 row와 col수
tot_rows=rd_worksheet.nrows
#읽을 엑셀 파일 row, col
for row in range(0, tot_rows):
for value in rd_worksheet.row_values(row):
wt_app_worksheet.write(wt_row, wt_col, value)
wt_col=wt_col+1
wt_row+=1
wt_col=0
wt_app_workbook.close()
def main():
file_names=os.listdir(os.getcwd())
filter(file_names)
if __name__ == '__main__':
main() | {"/all_scraper/naver_scraper/admin.py": ["/all_scraper/naver_scraper/models.py"], "/all_scraper/naver_scraper/views.py": ["/all_scraper/naver_scraper/models.py", "/all_scraper/naver_scraper/Urllist_server.py"]} |
66,954 | doldol1/Crawlaper | refs/heads/master | /all_scraper/naver_scraper/Urllist_server.py | from bs4 import BeautifulSoup
import urllib.request
import requests
import datetime
import re
import http.cookiejar
#이상한 검색어: asdkfdsl
# target_url='http://search.naver.com/search.naver?sm=tab_hty.top&where=news&'\
# 'query=asdkfdsl&'\
# 'oquery=%EA%B8%B0%EB%8A%A5%EC%84%B1%EC%8B%9D%ED%92%88&'\
# 'ie=utf8&tqi=TiH03spySDossvJNudwssssssvs-266552'
# https://search.naver.com/search.naver?where=news&query=%EA%B8%B0%EB%8A%A5%EC%84%B1%EC%8B%9D%ED%92%88&ie=utf8&sm=tab_opt&sort=0&photo=0&field=0&reporter_article=&pd=0&ds=2000.01.01&de=2013.05.03&docid=&nso=so%3Ar%2Cp%3Aall%2Ca%3Aall&mynews=1&mson=0&refresh_start=0&related=0
# https://search.naver.com/search.naver?where=news&query=%EA%B8%B0%EB%8A%A5%EC%84%B1%EC%8B%9D%ED%92%88&ie=utf8&sm=tab_opt&sort=0&photo=0&field=0&reporter_article=&pd=3&ds=2000.01.01&de=2013.05.03&docid=&nso=so%3Ar%2Cp%3Afrom20000101to20130503%2Ca%3Aall&mynews=1&mson=0&refresh_start=0&related=0
#해당 기간에 없는 검색어: 용팔이
target_url='https://search.naver.com/search.naver?where=news&'\
'query=%EC%9A%A9%ED%8C%94%EC%9D%B4&'\
'ie=utf8&sm=tab_opt&sort=0&photo=0&field=0&reporter_article=&pd=3&'\
'ds=2000.01.01&de=2000.01.02&docid=&'\
'nso=so%3Ar%2Cp%3Afrom20000101to20000102%2Ca%3Aall&mynews=0&'\
'mson=0&refresh_start=0&related=0'
#기능성식품 전 기간, 모든 옵션 미적용
target_url='https://search.naver.com/search.naver?where=news&'\
'query=%EA%B8%B0%EB%8A%A5%EC%84%B1%EC%8B%9D%ED%92%88&'\
'ie=utf8&sm=tab_opt&sort=0&photo=0&field=0&reporter_article=&pd=0&'\
'ds=&de=&docid=&'\
'nso=so%3Ar%2Cp%3Aall%2Ca%3Aall&mynews=1&'\
'mson=0&refresh_start=0&related=0'
#############값의 입력(언론사를 제외하고) ###############
## ds: 시작일
## de: 종료일
## field: 값이 0이면 검색어가 제목 or 내용에 포함, 값이 1이면 제목만 포함
## 언론사: 쿠키의 news_office_checked=에 1032,1005,2312,1020,2385,1021,1081,1022,2268,1023,1025,1028,1469
## (경향, 국민, 내일, 동아, 매일, 문화, 서울, 세계, 아시아투데이, 조선, 중앙, 한겨레, 한국 순)값이 들어가면 메이저 주간 언론사
## 쿠키에 news_office_checked값을 넣지 않으면 모든 언론사
## 상세검색
## 상세검색 옵션을 사용해도 검색창에 직접 입력하는 것과 같은 효과를 내며 ""안에 있는 단어는 정확한 단어, +옆의 단어는 반드시 포함하는 단어, -는 제외하는 단어로 사용된다. 띄어쓰기는->'+', '+'->'%2B' '-'->'-'로 나타낸다.
##값은 몇 가지로 나뉜다, 제목, 날자, 언론사, URL, 본문
##제목: 기사의 제목(str)
##날자: 기사의 날자(str)
##언론사: 기사를 기재한 언론사(str)
##URL: 기사가 있는 url(str)
##본문: 기사 본문(str)
###########################################
class Urllist:
def __init__(self):
self.MIN_RESULT=2500#최소한 이정도는 나올 수 있도록 기존에는 3000이었으나 루프가 많아져서 수정함
self.MAX_RESULT=3900#결과가 더 나올 수 있으므로, 넉넉하게 여유 잡아
def url_changer(self, target_url, start, end):
splited_url=target_url.split('&')
for splited in range(len(splited_url)):
if 'ds=' in splited_url[splited]:
splited_url[splited]="ds="+start.strftime("%Y.%m.%d")
# print(start.strftime("%Y.%m.%d"))
elif 'de=' in splited_url[splited]:
splited_url[splited]="de="+end.strftime("%Y.%m.%d")
# print(end.strftime("%Y.%m.%d"))
elif 'pd=' in splited_url[splited]:
splited_url[splited]="pd=3"
elif 'nso=':#언론사와도 관련있는 듯 하니.. 참고
if 'from' in splited_url[splited]:
tmp_nso=splited_url[splited].split('from')[0]
splited_url[splited]=tmp_nso+'from'+start.strftime("%Y%m%d")+'to'+end.strftime("%Y%m%d")+'%2Ca%3Aall'
elif 'all' in splited_url[splited]:
splited_url[splited]=splited_url[splited].replace('all','')+'from'+start.strftime("%Y%m%d")+'to'+end.strftime("%Y%m%d")+'%2Ca%3Aall'
else:
pass
reunited_url='&'.join(splited_url)
return reunited_url
def news_counter(self, bs_instance):
news_count=bs_instance.find('div',{'class': 'title_desc all_my'}).span.get_text()
news_count=int(re.search('[0-9]*,{0,1}[0-9]+건', news_count).group().replace(',','').replace('건', ''))
return news_count
def search_composer(self, target_url, press_codes, url_list=None, start=datetime.date(2000,1,1), end=datetime.date.today()):
url_list=url_list if url_list is not None else list()
if end>datetime.date.today():
end=datetime.date.today()
composed_url=self.url_changer(target_url, start, end)
html=None
if press_codes == 'daily_press':
press_cookie={'Cookie':'news_office_checked=1032,1005,2312,1020,2385,1021,1081,1022,2268,1023,1025,1028,1469'}
else:
press_cookie={'Cookie': 'news_office_checked='}
while html==None:
try:
req_instance=urllib.request.Request(composed_url, headers=press_cookie)
html=urllib.request.urlopen(req_instance)
except urllib.error.URLError as e:
print(e)
print("Naver와 접속되지 않습니다. 다시 접속을 시도합니다.")
continue
bs_instance=BeautifulSoup(html, 'lxml', from_encoding='utf-8')
pivot=end-start
if bs_instance.find('div', {'class':'noresult_tab'}):#범위 내 검색결과 없음
if (start==datetime.date(2000,1,1)) & (end>=datetime.date.today()):#전체 없음
print('검색 결과값이 존재하지 않습니다.')
return url_list
else:#여기에만 없음
return self.search_composer(composed_url, press_codes, url_list, start, start+pivot*1.5)
else:#검색결과가 존재한다면 재귀형식으로 작동하는 알고리즘 작성
news_count=self.news_counter(bs_instance)
if (start==datetime.date(2000,1,1)) & (end>=datetime.date.today()):#전체 없음
print('총',news_count,'개의 뉴스가 있습니다.')
# if news_count>=4000 & news_count<=6000:
# pivot=pivot/2
# print('{0} to {1}: {2} data is searched'.format(start, end, news_count))
if self.MAX_RESULT<news_count:
print('뉴스가 '+str(news_count)+'개로 지나치게 많으니, 범위를 줄입니다.')
return self.search_composer(composed_url, press_codes, url_list, start, start+pivot*0.7)
else:
if self.MIN_RESULT>news_count:
if end==datetime.date.today():
url_list.append(composed_url)
print("{0} to {1}: {2} data is searched, append. I know it's not enough size, but it is the last result.".format(start, end, news_count))
return url_list
elif end>datetime.date.today():
print('Error: Future news')
return None
else:
print('뉴스가 '+str(news_count)+'개로 지나치게 적으니, 범위를 늘립니다.')
return self.search_composer(composed_url, press_codes, url_list, start, start+pivot*1.25)
else:
url_list.append(composed_url)
if end==datetime.date.today():
print('{0} to {1}: {2} data is searched, append. And this is the last'.format(start, end, news_count))
return url_list
elif end>datetime.date.today():
print('Error: Future news')
return None
else:
print('{0} to {1}: {2} data is searched, append'.format(start, end, news_count))
return self.search_composer(composed_url, press_codes, url_list, end+datetime.timedelta(days=1), end+datetime.timedelta(days=1)+pivot*1.5)
def main():
file_instance=('urllist.txt', 'w')
listed=Urllist()
the_list=listed.search_composer(target_url)
for url in the_list:
file_instance.write(url)
file_instance.write(',')
if __name__=='__main__':
main() | {"/all_scraper/naver_scraper/admin.py": ["/all_scraper/naver_scraper/models.py"], "/all_scraper/naver_scraper/views.py": ["/all_scraper/naver_scraper/models.py", "/all_scraper/naver_scraper/Urllist_server.py"]} |
66,955 | doldol1/Crawlaper | refs/heads/master | /all_scraper/naver_scraper/migrations/0001_initial.py | # Generated by Django 2.0 on 2018-02-24 08:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='SearchCondition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ds', models.DateField(verbose_name='start date')),
('de', models.DateField(verbose_name='end date')),
('search_mode', models.IntegerField()),
('press_codes', models.CharField(max_length=500)),
('essential_word', models.CharField(max_length=200)),
('exact_word', models.CharField(max_length=200)),
('except_word', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='SearchResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('news_subject', models.CharField(max_length=1000)),
('news_date', models.DateField(verbose_name='news published date')),
('news_press', models.CharField(max_length=100)),
('news_url', models.CharField(max_length=2000)),
('news_body', models.CharField(max_length=100000)),
('search_condition', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='naver_scraper.SearchCondition')),
],
),
]
| {"/all_scraper/naver_scraper/admin.py": ["/all_scraper/naver_scraper/models.py"], "/all_scraper/naver_scraper/views.py": ["/all_scraper/naver_scraper/models.py", "/all_scraper/naver_scraper/Urllist_server.py"]} |
66,956 | doldol1/Crawlaper | refs/heads/master | /all_scraper/naver_scraper/views.py | from django.http import HttpResponse
from django.shortcuts import render
from datetime import datetime
from .models import SearchCondition
from .Urllist_server import Urllist
from .file_integrate_server import filter
from .Naver_crawler_server import Naver_crawler
from .Naver_crawler_server import Naver_scraper
from .Naver_crawler_server import Naver_datasaver
import Urlmake_server
def index(request):
return render(request, 'naver_scraper/index.html')
def crawling(request):
condition=SearchCondition()
condition.key_word=request.POST['key_word']
condition.essential_word=request.POST['essential_word']
condition.exact_word=request.POST['exact_word']
condition.except_word=request.POST['except_word']
if request.POST['ds']:
condition.ds=datetime.strptime(request.POST['ds'], "%Y-%m-%d")
else:
condition.ds=datetime.strptime('2000-01-01', "%Y-%m-%d")
if request.POST['de']:
condition.de=datetime.strptime(request.POST['de'], "%Y-%m-%d")
else:
condition.de=datetime.today()
condition.press_codes=request.POST['press_codes']
condition.search_mode=request.POST['search_mode']
gend_url=url_make(condition)
list_instance=Urllist()
crawler=Naver_crawler()
scraper=Naver_scraper()
saver=Naver_datasaver()
list_instance.url_changer(gend_url, condtion.ds, condition.de)
return HttpResponse(gend_url)
| {"/all_scraper/naver_scraper/admin.py": ["/all_scraper/naver_scraper/models.py"], "/all_scraper/naver_scraper/views.py": ["/all_scraper/naver_scraper/models.py", "/all_scraper/naver_scraper/Urllist_server.py"]} |
67,018 | Karan-S-Mittal/DataFlair-News-Aggregator | refs/heads/master | /news/models.py | from django.db import models
# Create your models here.
# Scrape data coming from websites
# The posts will contain images, urls and titles
class Headline(models.Model):
title = models.CharField(max_length=300)
image = models.URLField(max_length=1000, blank=True)
url = models.URLField(max_length=500)
def __str__(self):
return self.title
| {"/news/admin.py": ["/news/models.py"]} |
67,019 | Karan-S-Mittal/DataFlair-News-Aggregator | refs/heads/master | /news/urls.py | from django.urls import path
from news.views import scrape, news_list
urlpatterns = [
path('scrape/', scrape, name="scrape"),
path('', news_list, name="home"),
] | {"/news/admin.py": ["/news/models.py"]} |
67,020 | Karan-S-Mittal/DataFlair-News-Aggregator | refs/heads/master | /news/admin.py | from django.contrib import admin
from news.models import Headline
# Register your models here.
admin.site.register(Headline)
| {"/news/admin.py": ["/news/models.py"]} |
67,021 | prostomusa/Factory | refs/heads/main | /poll/views.py | from django.shortcuts import render
from rest_framework import status
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.decorators import api_view, permission_classes
from rest_framework import generics
from django.http import HttpResponse, JsonResponse, HttpRequest
from .serializer import *
from .models import *
import json
from datetime import datetime
#from account.api.serializers import RegistrationSerializer
# Create your views here.
class PollsCreateView(generics.CreateAPIView):
serializer_class = AnswerQuestionSerializer
@api_view(['GET', ])
def polls_list(request: HttpRequest) -> HttpResponse:
list_polls = []
polls = Polls.objects.all()
if request.method == "GET":
for i in polls:
dicti = {}
serializer = PollsSerializer(i)
dicti['ID Опроса'] = serializer.data['id']
dicti['Название опроса'] = serializer.data['name']
dicti['Описание'] = serializer.data['description']
dicti['Вопросы'] = serializer.data['poll']
list_polls.append(dicti)
return Response(json.loads(json.dumps(list_polls)))
@api_view(['GET'])
def get_question(request: HttpRequest, id_poll: int) -> HttpResponse:
dicti = {}
try:
polls = Polls.objects.get(id=id_poll)
except Polls.DoesNotExist:
return JsonResponse({'Опрос':'Опроса с таким ID не существует'})
if request.method == "GET":
mass = []
questions = polls.pl.all()
for i in questions:
print(i.question)
dicti['question'] = i.question
dicti['answer'] = ""
mass.append(dicti.copy())
return Response(json.loads(json.dumps(mass)))
@api_view(['POST'])
def answer_question(request: HttpRequest,\
id_poll: int, id_user: int) -> HttpResponse:
try:
polls = Polls.objects.get(id=id_poll)
useranswer = Profile.objects.get(id=id_user,polls=polls)
user = Profile.objects.filter(id=id_user, polls=polls)
if len(user) > 0:
return Response('Вы уже проходили этот опрос')
else:
useranswer.polls.add(polls)
except Profile.DoesNotExist:
useranswer = Profile(id=id_user)
useranswer.save()
useranswer.polls.add(polls)
#useranswer.save()
except Polls.DoesNotExist:
return JsonResponse({'Опрос':'Опроса с таким ID не существует'})
if request.method == "POST":
polls = useranswer.polls.get(id=id_poll)
for i in request.data:
quest = Question.objects.get(poll=polls, question=i['question'])
temp = Answer(question=quest, answer=i['answer'])
temp.save()
serializer = AnswerQuestionSerializer(temp, data=i)
if serializer.is_valid():
useranswer.answer.add(temp)
else:
useranswer.polls.remove(polls)
temp.delete()
return Response('Вы неправильно ввели данные')
polls.date_end = datetime.now()
polls.save()
return Response("Спасибо за прохождение опроса")
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET'])
def get_polls(request: HttpRequest, id_user: int) -> HttpResponse:
try:
useranswer = Profile.objects.get(id=id_user)
except Profile.DoesNotExist:
return Response('User с таким ID не существует')
if request.method == "GET":
mina = useranswer.polls.all()
datas = []
datas1 = []
answers = []
k = 0
t = 0
for i in mina:
tr = useranswer.polls.get(id=i.id)
datas.append({'ID': i.id, 'Начало': i.date_start, 'Окончание': i.date_end, 'Название опроса': i.name})
question = tr.pl.all()
answers = useranswer.answer.all()
if len(question) < 1:
datas[k]['Ваши вопросы и ответы'] = []
else:
for l in range(len(question)):
datas1.append({'Вопрос': question[l].question, 'Ответ': answers[t].answer})
t += 1
datas[k]['Ваши вопросы и ответы'] = datas1[:]
datas1 = []
k += 1
return Response(datas)
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def polls_create(request: HttpRequest) -> HttpResponse:
if request.method == "POST":
dicti = {}
serializer = PollsCreateSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response('Вы создали опрос с названием - {}'.format(request.data['name']))
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['DELETE', 'PUT'])
@permission_classes([IsAuthenticated])
def poll(request: HttpRequest, id_poll: int) -> HttpResponse:
try:
polls = Polls.objects.get(id=id_poll)
except Polls.DoesNotExist:
return JsonResponse({'Опрос':'Опроса с таким ID не существует'})
if request.method == 'DELETE':
name = polls.name
polls.delete()
return Response({'Опрос с названием {} был удален'.format(name)})
elif request.method == 'PUT':
serializer = PollsSerializer(polls, data=request.data, partial=True)
dicti = {}
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def question_create(request: HttpRequest, id_poll: int) -> HttpResponse:
try:
polls = Polls.objects.get(id=id_poll)
except Polls.DoesNotExist:
return JsonResponse({'Опрос':'Опроса с таким ID не существует'})
if request.method == "POST":
data = []
for i in request.data:
print(i['question'])
temp = Question(poll=polls, question=i['question'], t_question=i['t_question'])
temp.save()
serializer = QuestionsSerializer(temp, data=i)
if serializer.is_valid():
serializer.save()
data.append(i['question'])
return Response(json.loads(json.dumps(data)))
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['DELETE', 'PUT'])
@permission_classes([IsAuthenticated])
def question(request: HttpRequest, id_question: int) -> HttpResponse:
try:
question = Question.objects.get(id=id_question)
except Question.DoesNotExist:
return JsonResponse({'Вопроса':'Вопроса с таким ID не существует'})
if request.method == "DELETE":
name = question.question
question.delete()
return Response({'Вопрос - ({0}) был удален из опроса'.format(name)})
elif request.method == "PUT":
serializer = QuestionsSerializer(question, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| {"/poll/views.py": ["/poll/serializer.py", "/poll/models.py"], "/poll/serializer.py": ["/poll/models.py"], "/poll/urls.py": ["/poll/views.py"]} |
67,022 | prostomusa/Factory | refs/heads/main | /poll/serializer.py | from rest_framework import serializers
from .models import *
class QuestionDisplaySerializer(serializers.ModelSerializer):
type_of_question = serializers.CharField(source='get_t_question_display')
class Meta:
model = Question
fields = ['id', 'question', 'type_of_question']
class QuestionsSerializer(serializers.ModelSerializer):
class Meta:
model = Question
fields = ['question', 't_question']
class AnswerQuestionSerializer(serializers.ModelSerializer):
question = serializers.CharField()#source='ans')
class Meta:
model = Answer
fields = ['question', 'answer']
class PollsCreateSerializer(serializers.ModelSerializer):
class Meta:
model = Polls
fields = ['id', 'name', 'description']
class PollsSerializer(serializers.ModelSerializer):
poll = QuestionDisplaySerializer(source='pl', many=True)
class Meta:
model = Polls
fields = ['id', 'name', 'description', 'poll']
| {"/poll/views.py": ["/poll/serializer.py", "/poll/models.py"], "/poll/serializer.py": ["/poll/models.py"], "/poll/urls.py": ["/poll/views.py"]} |
67,023 | prostomusa/Factory | refs/heads/main | /poll/migrations/0002_auto_20210214_2229.py | # Generated by Django 2.2.10 on 2021-02-14 22:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('poll', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='polls',
name='date_end',
field=models.DateTimeField(null=True),
),
]
| {"/poll/views.py": ["/poll/serializer.py", "/poll/models.py"], "/poll/serializer.py": ["/poll/models.py"], "/poll/urls.py": ["/poll/views.py"]} |
67,024 | prostomusa/Factory | refs/heads/main | /poll/models.py | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
# Create your models here.
class Polls(models.Model):
name = models.CharField(max_length=50)
date_start = models.DateTimeField(auto_now_add=True)
date_end = models.DateTimeField(null=True)
description = models.CharField(max_length=150)
class Question(models.Model):
poll = models.ForeignKey(Polls, on_delete = models.CASCADE, related_name='pl')
question = models.CharField(max_length=50, unique=True)
type_questions = (
(1, "Ответ текстом"),
(2, "Ответ с одним вариантом ответа"),
(3, "Ответ с несколькими вариантами ответа"),
)
t_question = models.IntegerField(choices=type_questions)
class Answer(models.Model):
question = models.ForeignKey(Question, on_delete = models.CASCADE, related_name='qs')
answer = models.CharField(max_length=50)
class Profile(models.Model):
polls = models.ManyToManyField(Polls)
answer = models.ManyToManyField(Answer)
| {"/poll/views.py": ["/poll/serializer.py", "/poll/models.py"], "/poll/serializer.py": ["/poll/models.py"], "/poll/urls.py": ["/poll/views.py"]} |
67,025 | prostomusa/Factory | refs/heads/main | /poll/migrations/0001_initial.py | # Generated by Django 2.2.10 on 2021-02-14 20:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Polls',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('date_start', models.DateTimeField(auto_now_add=True)),
('date_end', models.DateTimeField()),
('description', models.CharField(max_length=150)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.CharField(max_length=50)),
('t_question', models.IntegerField(choices=[(1, 'Ответ текстом'), (2, 'Ответ с одним вариантом ответа'), (3, 'Ответ с несколькими вариантами ответа')])),
('poll', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pl', to='poll.Polls')),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer', models.ManyToManyField(to='poll.Answer')),
('polls', models.ManyToManyField(to='poll.Polls')),
],
),
migrations.AddField(
model_name='answer',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='qs', to='poll.Question'),
),
]
| {"/poll/views.py": ["/poll/serializer.py", "/poll/models.py"], "/poll/serializer.py": ["/poll/models.py"], "/poll/urls.py": ["/poll/views.py"]} |
67,026 | prostomusa/Factory | refs/heads/main | /poll/urls.py | from django.urls import re_path, path, include
from .views import *
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
path('poll/answer/<int:id_poll>/<int:id_user>', answer_question),
path('poll/<int:id_poll>/get_question', get_question),
path('poll/question/<int:id_question>', question),
path('poll/<int:id_poll>/question', question_create),
path('poll/user/<int:id_user>', get_polls),
path('poll/<int:id_poll>/', poll),
path('poll/create/', polls_create),
path('poll/list/', polls_list),
path('login', obtain_auth_token, name="login"),
#path('profile/create/', ProfileCreateView.as_view()),
#path('client/create/', ClientCreateView.as_view())
]
| {"/poll/views.py": ["/poll/serializer.py", "/poll/models.py"], "/poll/serializer.py": ["/poll/models.py"], "/poll/urls.py": ["/poll/views.py"]} |
67,029 | mariabenitocst/oPDF | refs/heads/master | /PotentialProf.py | """ This file creates the potential and cumulative density profile templates for a given DM file"""
from oPDF import *
from myutils import *
import h5py
import os
import sys
from scipy.stats import chi2
# plt.ion()
# set units if needed
hubble = 0.73
Globals.set_units(1e10 * hubble, hubble, 1.)
#set to 1e10Msun, kpc, km/s with the current h
DMfile = oPDFdir + '/../../data/B4DM.hdf5'
# real parameters, for comparison with analytical profile:
M0 = 183.8 # Spherical-overdensity Mass M0
C0 = 15.07 # Rv0/Rs0
nbin = 100 # do not change this, unless you change TemplateData.h as well.
npart = 0 # number of particles to use. 0 means FullSample.
FullSample = Tracer(DMfile, rmin=0, rmax=500)
Sample = FullSample.copy(0, npart)
Sample.mP = FullSample.mP*FullSample.nP / \
Sample.nP # rescale particle mass to account for selection
FullSample.clean()
xbin = np.logspace(np.log10(0.1), np.log10(500), nbin)
xcen = xbin / np.sqrt(xbin[1]/xbin[0])
vol = np.diff(np.hstack([0., xbin])**3) * np.pi*4/3
countM, tmp = np.histogram(Sample.data['r'], np.hstack([0., xbin])) # dM
countR, tmp = np.histogram(
Sample.data['r'], np.hstack([0., xbin]), weights=1. /Sample.data['r'])#dM/R
density = countM * Sample.mP/vol
density_err = np.sqrt(countM) * Sample.mP/vol
pot = countM.cumsum()/xbin+countR.sum() - \
countR.cumsum() # M(<r)/r+\int_r^rmax dM/r
pot *= Globals.units.Const.G * Sample.mP
density_cum = countM.cumsum() / xbin**3/(4*np.pi/3)*Sample.mP
# pad with bin 0
xbin = np.hstack([0., xbin])
pot = np.hstack([countR.sum() * Globals.units.Const.G*Sample.mP, pot])
density_cum = np.hstack([density_cum[0], density_cum])
halo = Halo()
halo.set_param([M0, C0])
potNFW = -halo.pot(xbin)
# iref=-1
iref = np.abs(xbin - halo.Rs).argmin()
plt.plot(xbin, pot - pot[iref] +potNFW[iref], 'gx')
plt.plot(xbin, potNFW, 'k')
plt.loglog()
plt.xlabel('R')
plt.ylabel(r'$\psi$')
plt.legend(('Data', 'NFW analytical'))
# plt.savefig('DensityProf.eps') #rasterize=True, dpi=300
print('Profile template to be added to C/TemplateData.h:')
print('R')
print(','.join(['{:f}'.format(i) for i in xbin]))
print('Pot')
print(','.join(['{:f}'.format(i) for i in pot]))
print('AvDensity')
print(','.join(['{:g}'.format(i) for i in density_cum]))
# Now recompile and try the newly added template
TMPid = 1 # change to id of the newly added template
xnew = np.logspace(-1, 3, 50)
tmphalo0 = Halo(halotype=HaloTypes.TMPPotScaleRScale, TMPid=TMPid)
tmphalo0.set_param([1, 1])
potNew0 = -tmphalo0.pot(xnew)
tmphalo = Halo(halotype=HaloTypes.TMPMC, TMPid=TMPid)
tmphalo.set_param([M0, C0])
potNew = -tmphalo.pot(xnew)
tmphalo2 = Halo(halotype=HaloTypes.TMPMC, TMPid=TMPid)
tmphalo2.set_param([2 * M0, C0])
potNew2 = -tmphalo2.pot(xnew)
plt.figure()
plt.plot(xnew, potNew, 'r-')
plt.plot(xnew, potNew2, 'g-')
plt.plot(xbin, pot, 'ko')
plt.plot(xnew, potNew0, 'b-', linewidth=6, alpha=0.3)
plt.loglog()
plt.legend(('Template(M0,c0)', 'Template(2M0,c0)', 'Data', 'Template0'))
plt.show()
# finalize
Sample.clean()
# np.savetxt('B4density.cen_mstbnd.dat',np.array([xcen*hubble,density/hubble**2,
# density_err/hubble**2]).T, header='R/(kpc/h), rho/(1e10*h^2*Msun/kpc),
# rho_err')
| {"/PotentialProf.py": ["/oPDF.py"], "/FitNFWProfML.py": ["/oPDF.py"]} |
67,030 | mariabenitocst/oPDF | refs/heads/master | /oPDF.py | """ Python interface to the C code for oPDF modelling.
It wraps the C functions into python classes with ctypes.
"""
from math import *
import numpy as np
import ctypes
import os
from myutils import Chi2Sig, AD2Sig, density_of_points, get_extent, NamedValues, NamedEnum
# from myutils import fmin_gsl
# from scipy.optimize import fmin, fmin_powell
import matplotlib.pyplot as plt
#from iminuit import Minuit
#from iminuit.frontends import ConsoleFrontend
from scipy.optimize import newton, brentq, fmin, curve_fit
# import copy
oPDFdir = os.path.dirname(__file__)
if oPDFdir == '':
oPDFdir = '.'
#=======================load the library===============================
lib = ctypes.CDLL(oPDFdir +"/liboPDF.so")
#=======================prototype the library==========================
#==globals.h
class global_tol(ctypes.Structure):
_fields_ = [('bin', ctypes.c_double),
('bin_abs', ctypes.c_double),
('rel', ctypes.c_double)]
class global_cosm(ctypes.Structure):
_fields_ = [('OmegaM0', ctypes.c_double),
('OmegaL0', ctypes.c_double)]
class global_const(ctypes.Structure):
_fields_ = [('G', ctypes.c_double),
('H0', ctypes.c_double)]
class global_units(ctypes.Structure):
_fields_ = [('MassInMsunh', ctypes.c_double),
('LengthInKpch', ctypes.c_double),
('VelInKms', ctypes.c_double),
('Const', global_const)]
lib.set_units.restype = None
lib.set_units.argtypes = [ctypes.c_double, ctypes.c_double, ctypes.c_double]
class globals_t(ctypes.Structure):
''' global variables of the module. It controls numerical precision, internal units, and cosmology.
The numerical precision for orbit integration is controlled by Globals.tol.rel, which defaults to 1e-3
(Globals.tol.rel=1e-2 should already be sufficient for likelihood inference and phase calculations).'''
_fields_ = [('tol', global_tol),
('cosmology', global_cosm),
('units', global_units)]
def set_defaults(self):
'''set default global parameters,
including precision, cosmology and units'''
lib.default_global_pars()
def set_units(self, MassInMsunh=1.e10, LengthInKpch=1., VelInKms=1.):
'''set system of units.
specify Mass in Msun/h, Length in kpc/h, Velocity in km/s.
:example:
If you want to use (1e10Msun, kpc, km/s) as units, and you adopt :math:`h=0.73` in your model, then you can set the units like below
>>> h=0.73
>>> Globals.set_units(1e10*h,h,1)
That is, to set them to (1e10h Msun/h, h kpc/h, km/s).
.. note::
- The user should only use Globals.set_units() to change the units, which automatically updates several interal constants related to units. Never try to change the internal unit variables (e.g., Globals.units.MassInMsunh) manually.
- To avoid inconsistency with units of previously loaded tracers, you must do it immediately after importing the :module:`oPDF` module if you need to call :func:`set_units`.'''
lib.set_units(MassInMsunh, LengthInKpch, VelInKms)
def get_units(self):
'''query the units'''
print('Mass :', self.units.MassInMsunh, 'Msun/h')
print('Length:', self.units.LengthInKpch, 'kpc/h')
print('Vel :', self.units.VelInKms, 'km/s')
return (self.units.MassInMsunh, self.units.LengthInKpch, self.units.VelInKms)
class _NamedEnum(NamedEnum):
# just an alias of NamedEnum
# it is here just because sphinx does not auto-doc instances of imported classes,
# so we make it a native class by aliasing it.
pass
VirTypes = _NamedEnum('TH C200 B200')
'''Collection of virial definitions. It contains
- ``VirTH``: the spherical collapse prediction (i.e, Bryan & Norman 98 fitting).
- ``VirB200``: the 200 times mean density deifinition.
- ``VirC200``: the 200 times critical density definition.
'''
Globals = globals_t.in_dll(lib, 'Globals')
''' Collection of global variables of the module, of class :class:`globals_t`. It controls numerical precision, internal units, and cosmology.
'''
#===halo.h
MaxNPar = 10
Param_t = ctypes.c_double *MaxNPar
class Halo_t(ctypes.Structure):
_fields_ = [('pars', Param_t), # parameter values
('scales', Param_t),
# parameter scales. real parameters are set by
# pars*scales
('z', ctypes.c_double),
('M', ctypes.c_double),
('c', ctypes.c_double),
('Rv', ctypes.c_double),
('Pots', ctypes.c_double),
#-4*pi*G*rhos*rs^2, the potential at r=0
('Rs', ctypes.c_double),
('Rhos', ctypes.c_double),
('Ms', ctypes.c_double), # 4*pi*rs^3*rhos
('RScale', ctypes.c_double),
# for TMP profile, Rs/Rs0
('PotScale', ctypes.c_double),
#for TMP profile, Pots/Pots0
('K', ctypes.c_double),
#M=KR, for isothermal profile, where K=Vc^2/G.
('IsForbidden', ctypes.c_int),
# whether the halo parameters are forbidden(e.g,
# negative mass)
('virtype', ctypes.c_int),
('type', ctypes.c_int)
]
Halo_p = ctypes.POINTER(Halo_t)
lib.halo_set_type.restype = None
lib.halo_set_type.argtypes = [
ctypes.c_int, ctypes.c_int, ctypes.c_double, Param_t, Halo_p, ctypes.c_int]
lib.halo_set_param.restype = None
lib.halo_set_param.argtypes = [Param_t, Halo_p]
lib.halo_mass.restype = ctypes.c_double
lib.halo_mass.argtypes = [ctypes.c_double, Halo_p]
lib.halo_pot.restype = ctypes.c_double
lib.halo_pot.argtypes = [ctypes.c_double, Halo_p]
lib.isNFW.restype = ctypes.c_int
lib.isNFW.argtypes = [ctypes.c_int]
HaloTypes = _NamedEnum(
'NFWMC NFWPotsRs NFWRhosRs TMPMC TMPPotScaleRScale CoreRhosRs CorePotsRs PointM IsothermalK')
''' Collection of halo types. It contains
- ``NFWMC``: NFW halo parametrized by :math:`(M,c)`
- ``NFWRhosRs``: NFW, :math:`(\\rho_s,r_s)`
- ``NFWPotsRs``: NFW, (:math:`|\\psi_s|, r_s`\ ), with :math:`|\\psi_s|=4\\pi G\\rho_s r_s^2`\ .
- ``CorePotsRs``: Cored Generalized NFW Potential (inner density slope=0), parametrized by (:math:`|\\psi_s|,r_s`\ )
- ``CoreRhosRs``: Cored GNFW, :math:`(\\rho_s,r_s)`
- ``TMPMC``: Template profile, :math:`(M,c)` parametrization
- ``TMPPotScaleRScale``: Template, :math:`\\psi_s/\\psi_{s0}, r_s/r_{s0}`
- ``PointM``: Point Mass at r=0
- ``IsothermalK``: Isothermal halo, :math:`M(r)=Kr`
'''
class Halo(Halo_t):
'''a general halo describing the potential. It has the following properties
:ivar pars: raw parameter values. do not change them manually, use :func:`set_param` to set them.
:ivar scales: parameter scales. use :func:`set_type` to set them.
:ivar virtype: virial definition. One of :const:`VirTypes'.
:ivar type: parametrization type. One of :const:`HaloTypes`.
Depending on the type of the halo, some of the following properties may be calculated during :func:`set_param`:
:ivar M: mass
:ivar c: concentration
:ivar Rv: virial radius
:ivar Pots: :math:`\\psi_s=4\\pi G\\rho_s r_s^2`.
:ivar Rhos: scale density for NFW
:ivar Rs: scale radius
:ivar RScale: :math:`r_s/r_{s0}` for TMP profile
:ivar PotScale: :math:`\psi_s/\psi_{s0}` for TMP profile
'''
def __init__(self, halotype=HaloTypes.NFWMC, virtype=VirTypes.C200, redshift=0., scales=None, TMPid=-1):
'''
:initializer:
define a halo by specifiying the parametrization, virial definition and redshift of halo
:param halotype: halo parametrization, one of the :const:`HaloTypes` members
:param virtype: virial definition, one of the :const:`VirTypes` members
:param redshift: redshift of halo
:param scales: scales of halo parameters, array-like, of the same shape as parameters. default to all-ones if None. physical parameters will be params*scales
:param TMPid: template id. only required when halotype is of template type
'''
Halo_t.__init__(self)
# print "initing halo"
self.set_type(halotype, virtype, redshift, scales, TMPid)
def set_type(self, halotype=HaloTypes.NFWMC, virtype=VirTypes.C200, redshift=0., scales=None, TMPid=-1):
'''set the parametrization, virial definition and redshift of halo
halotype: halo parametrization, one of the :data:`HaloTypes` members
virtype: virial definition, one of the :const:`VirTypes` members
redshift: redshift of halo
scales: scales of halo parameters, array-like, of the same shape as parameters. default to ones if not specified. physical parameters will be params*scales'''
if scales is None:
scales = np.ones(MaxNPar)
lib.halo_set_type(halotype.value, virtype.value,
redshift, Param_t(*scales), ctypes.byref(self), TMPid)
def set_param(self, pars=[1., 1.]):
'''set the parameters of the halo
pars: parameters describing the halo'''
lib.halo_set_param(Param_t(*pars), ctypes.byref(self))
def mass(self, r):
'''cumulative mass profile
:param r: array-like or float, the radius'''
try:
m = lib.halo_mass(r, ctypes.byref(self))
except:
m = np.array([lib.halo_mass(x, ctypes.byref(self)) for x in r])
return m
def pot(self, r):
'''potential
:param r: array-like or float, the radius'''
try:
m = lib.halo_pot(r, ctypes.byref(self))
except:
m = np.array([lib.halo_pot(x, ctypes.byref(self)) for x in r])
return m
def get_current_TMPid(self):
'''get the id of the template currently loaded in the system.
this func can be used to check whether the loaded template
is the template of the current halo, just in case the template does not match'''
return lib.get_current_TMPid(ctypes.byref(self))
def isNFW(self):
''' return True if halo is NFW, False if not '''
return bool(lib.isNFW(self.type))
def vr_inv(self, r, E, L):
''' inverse of radial velocity, 1/v_r, for a particle at r with binding energy E and angular momentum L'''
vr2 = 2 *(-E-0.5*(L/r)**2-self.pot(r))
ivr = 1. /np.sqrt(vr2)
try:
ivr[vr2 <= 0] =0.
except:
if vr2 <= 0:
ivr = 0.
return ivr
#==tracer.h
class Particle_t(ctypes.Structure):
_fields_ = [('haloid', ctypes.c_int),
('subid', ctypes.c_int),
#('strmid', ctypes.c_int),
('flag', ctypes.c_int),
('w', ctypes.c_double),
('r', ctypes.c_double),
('K', ctypes.c_double), # kinetic energy
('L2', ctypes.c_double), # L^2
('L', ctypes.c_double), # angular momentum
('x', ctypes.c_double * 3),
('v', ctypes.c_double * 3),
('E', ctypes.c_double),
('T', ctypes.c_double),
('vr', ctypes.c_double),
('theta', ctypes.c_double),
('rlim', ctypes.c_double * 2)
]
Particle_p = ctypes.POINTER(Particle_t)
class Tracer_t(ctypes.Structure):
pass
Tracer_p = ctypes.POINTER(Tracer_t)
Tracer_t._fields_ = [('lnL', ctypes.c_double),
('nP', ctypes.c_int),
('mP', ctypes.c_double),
('P', Particle_p),
('nbin_r', ctypes.c_int),
('FlagRLogBin', ctypes.c_int),
('RadialCount', ctypes.POINTER(
ctypes.c_double)),
('rmin', ctypes.c_double),
('rmax', ctypes.c_double),
('proxybin', ctypes.c_double * 2),
('halo', Halo),
('nView', ctypes.c_int),
('ViewType', ctypes.c_char),
('Views', Tracer_p)
] # : Tracer fields
lib.load_tracer_particles.restype = None
lib.load_tracer_particles.argtypes = [
ctypes.c_char_p, ctypes.c_char_p, Tracer_p, ctypes.c_int]
lib.cut_tracer_particles.restype = None
lib.cut_tracer_particles.argtypes = [
Tracer_p, ctypes.c_double, ctypes.c_double]
lib.shuffle_tracer_particles.restype = None
lib.shuffle_tracer_particles.argtypes = [ctypes.c_ulong, Tracer_p]
lib.squeeze_tracer_particles.restype = None
lib.squeeze_tracer_particles.argtypes = [Tracer_p]
lib.free_tracer_particles.restype = None
lib.free_tracer_particles.argtypes = [Tracer_p]
lib.print_tracer_particle.restype = None
lib.print_tracer_particle.argtypes = [Tracer_p, ctypes.c_int]
lib.resample_tracer_particles.restype = None
lib.resample_tracer_particles.argtypes = [ctypes.c_ulong, Tracer_p, Tracer_p]
lib.copy_tracer_particles.restype = None
lib.copy_tracer_particles.argtypes = [
ctypes.c_int, ctypes.c_int, Tracer_p, Tracer_p]
lib.count_tracer_radial.restype = None
lib.count_tracer_radial.argtypes = [Tracer_p, ctypes.c_int, ctypes.c_int]
lib.free_tracer_rcounts.restype = None
lib.free_tracer_rcounts.argtypes = [Tracer_p]
lib.sort_part_flag.restype = None
lib.sort_part_flag.argtypes = [Particle_p, ctypes.c_int]
lib.sort_part_E.restype = None
lib.sort_part_E.argtypes = [Particle_p, ctypes.c_int]
lib.sort_part_L.restype = None
lib.sort_part_L.argtypes = [Particle_p, ctypes.c_int]
lib.sort_part_R.restype = None
lib.sort_part_R.argtypes = [Particle_p, ctypes.c_int]
lib.create_tracer_views.restype = None
lib.create_tracer_views.argtypes = [Tracer_p, ctypes.c_int, ctypes.c_char]
lib.free_tracer_views.restype = None
lib.free_tracer_views.argtypes = [Tracer_p]
lib.create_nested_views.restype = None
lib.create_nested_views.argtypes = [
ctypes.POINTER(ctypes.c_int), ctypes.c_char_p, Tracer_p];
lib.free_tracer.restype = None
lib.free_tracer.argtypes = [Tracer_p]
lib.tracer_set_energy.restype = None
lib.tracer_set_energy.argtypes = [Tracer_p]
lib.tracer_set_orbits.restype = None
lib.tracer_set_orbits.argtypes = [Tracer_p, ctypes.c_int]
#==nfw.h
lib.NFW_like.restype = ctypes.c_double
lib.NFW_like.argtypes = [Param_t, Tracer_p]
#==template.h
lib.get_current_TMPid.restype = ctypes.c_int
lib.get_current_TMPid.argtypes = []
#==models.h
class NamedValuesEst(NamedValues):
def __init__(self, value, name):
NamedValues.__init__(self, value, name)
self.need_phase = True
class NamedEstimators(object):
def __init__(self, names):
for number, name in enumerate(names.split()):
setattr(self, name, NamedValuesEst(number, name))
Estimators = NamedEstimators('RBinLike AD MeanPhase MeanPhaseRaw')
''' Collection of dynamical estimators. It contains
- ``RBinLike``: binned radial likelihood.
Use ``RBinLike.nbin`` (``integer``) and ``RBinLike.logscale`` (``True`` or ``False``) to control the number and scale of bins.
Since the purpose of the binning is purely to suppress shot noise, a larger number of bins is generally better, as long as it is not too noisy. On the other hand, when the likelihood contours appear too irregular, one should try reducing the number of radial bins to ensure the irregularities are not caused by shot noise. In our analysis, we have adopted 30 bins for an ideal sample of 1000 particles, and 50 bins for :math:`10^6` particles in a realistic halo, although a bin number as low as 5 could still work.
- ``AD``: Anderson-Darling distance.
- ``MeanPhaseRaw``: Normalized mean phase deviation :math:`\\bar{\\Theta}=(\\bar{\\theta}-0.5)/\\sigma_{\\theta}`\ , to be compared to a standard normal variable.
- ``MeanPhase``: :math:`\\bar{\\Theta}^2`\ , to be compared to a chi-square variable.'''
Estimators.RBinLike.need_phase = False
Estimators.RBinLike.nbin = 20 # : The number of radial bins.
Estimators.RBinLike.logscale = True
lib.alloc_integration_space.restype = None
lib.alloc_integration_space.argtypes = []
lib.free_integration_space.restype = None
lib.free_integration_space.argtypes = []
lib.like_eval.restype = ctypes.c_double
lib.like_eval.argtypes = [ctypes.c_int, Tracer_p]
lib.nested_views_like.restype = ctypes.c_double
lib.nested_views_like.argtypes = [
ctypes.c_int, Tracer_p, ctypes.c_int, ctypes.c_int]
lib.DynFit.restype = ctypes.c_int
lib.DynFit.argtypes = [ctypes.POINTER(ctypes.c_double), ctypes.c_int,
ctypes.c_double, ctypes.c_double, ctypes.c_int, ctypes.c_int, ctypes.c_int, Tracer_p]
lib.predict_radial_count.restype = None
lib.predict_radial_count.argtypes = [
ctypes.POINTER(ctypes.c_double), ctypes.c_int, ctypes.c_int, Tracer_p]
def _lib_open():
'''initialize the library'''
Globals.set_defaults()
lib.alloc_integration_space()
def _lib_close():
'''finalize the library'''
lib.free_integration_space()
class Tracer(Tracer_t):
''' Tracer: a population of tracer particles.
:ivar halo: the halo (potential, type :class:`Halo`) for the tracer.
:ivar lnL: likelihood or distance for the sample from the previous likelihood calculation, depending on estimator.
:ivar nP: number of particles.
:ivar mP: average particle mass.
:ivar data: particle data, numpy record array format. It includes the following fields:
('haloid', 'subid', 'flag', 'w', 'r', 'K', 'L2', 'L', 'x', 'v', 'E', 'T', 'vr', 'theta', 'rlim')
.. note::
- The `w` field is the particle mass in units of the average particle mass. These are all ones if no particle mass is given in the datafile.
- the `haloid` and `subid` fields are only filled if you have `SubID` and `HaloID` datasets in the datafile when loading.
- The `E`,`theta` and `rlim` fields are the energy, phase-angle, and radial limits (peri and apo-center distances) of the orbits.These depend on the potential, and are only filled when you have done some calculation in a halo, or have filled them explicitly with :py:func:`set_phase`.
.. note::
The following members are provided for information, but do not manually assign to them. use :py:func:`radial_count` and :py:func:`radial_cut` to set them.
:ivar nbin_r: number of radial bins.
:ivar FlagRLogBin: whether radial binning is in logspace.
:ivar RadialCount: counts in radial bins.
:ivar rmin: lower radial cut.
:ivar rmax: upper radial cut.
'''
def __init__(self, datafile=None, grpname='/', rmin=None, rmax=None, shuffle=True, AddHubbleFlow=False):
'''
:Initializer:
loading a tracer from a datafile. grpname specifies the path to the dataset inside the hdf5 file, in case the file contains multiple datasets.
optionally, can apply radial cut given by rmin and rmax
if AddHubbleFlow=True, then also add hubble flow to the loaded velocity (only support redshift 0 data now).
.. note::
The datafile should contain physical positions and velocities of the tracer particles, relative to the center of the halo.
By default, the tracer particles will be shuffled after loading, for easy creation of subsamples by copying later.
To keep the original ordering of particles, set shuffle=False'''
Tracer_t.__init__(self)
self._pointer = ctypes.byref(self)
self.nP = 0
self.nView = 0
self.halo = Halo()
if datafile != None:
self.load(datafile, grpname, AddHubbleFlow)
self.radial_cut(rmin, rmax)
if shuffle:
self.shuffle()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.clean()
def __update_array(self):
'''this should be called whenever the pointer self.P has changed
to point the numpy array to the new memory'''
Parr = (Particle_t*self.nP).from_address(
ctypes.addressof(self.P.contents)) #struct array
self.data = np.frombuffer(Parr, np.dtype(Parr))[0] # the numpy array
def load(self, datafile, grpname='/', AddHubbleFlow=False):
'''load particles from datafile.
If the datafile contains multiple datasets, `grpname` can be further used to specify the path of the dataset in the hdf5 file.'''
if self.nP > 0:
lib.free_tracer(self._pointer)
lib.load_tracer_particles(
datafile.encode('utf-8'), grpname.encode('utf-8'), self._pointer, AddHubbleFlow)
self.__update_array()
self.rmin = self.data['r'].min()
self.rmax = self.data['r'].max()
# def fill(x,v):
#'''load particles from array. To be implemented.'''
# FIXME.
# pass
def clean(self):
'''release the C-allocated memory for the tracer'''
# never define it as __del__ and rely on the garbage collector.
# it's dangerous. gc may never call your __del__.
# call it yourself.
# print "cleaning Tracer ", id(self)
lib.free_tracer(self._pointer)
# print self.nView
def copy(self, offset=0, n=0):
'''create a subsample by copying n particles starting from offset.
if n==0, then copy all the particles starting from offset.
Only particles and their radial limits are copied. The halo, RadialCounts and Views are not copied into the new sample.
return the subsample'''
newsample = Tracer()
lib.copy_tracer_particles(
int(offset), int(n), newsample._pointer, self._pointer)
newsample.__update_array()
return newsample
def select(self, flags):
'''select particles according to flags array, into a new sample.
.. note::
- Same as :func:`copy`, only particles and their radial limits are copied. The halo, RadialCounts and Views are not copied into the new sample.
- When doing dynamical tests, one should avoid distorting the radial distribution with any radial selection. One can still apply radial cuts, but should only do this with the :func:`radial_cut` function. So never use :func:`select` on data['r'].'''
sample = self.copy(0, 0)
sample.data['flag'] = flags
sample.squeeze() # __update_array() is called automatically
return sample
def squeeze(self):
'''remove P.flag==0 (data['flag']==0) particles'''
lib.squeeze_tracer_particles(self._pointer)
self.__update_array()
def shuffle(self, seed=100):
'''shuffle particles randomly.
seed: optional, seeds the random number generator for the shuffle'''
lib.shuffle_tracer_particles(ctypes.c_ulong(seed), self._pointer)
def sort(self, proxy, offset=0, n=0):
'''sort the particles according to proxy
proxy can be 'E','L','r' or 'flag'.
offset, n: optional, sort n particles starting from offset.
n=0 means sort all particles starting from offset.'''
if n == 0:
n = self.nP -offset
sort_func = {'flag': lib.sort_part_flag, 'E':
lib.sort_part_E, 'L': lib.sort_part_L, 'r': lib.sort_part_R}
sort_func[proxy](
ctypes.byref(Particle_t.from_buffer(self.P[offset])), n)
def resample(self, seed=100):
''' create a bootstrap sample (sampling with replacement) of the same size from tracer
return the new sample'''
newsample = Tracer()
lib.resample_tracer_particles(seed, newsample._pointer, self._pointer)
newsample.__update_array()
return newsample
def radial_cut(self, rmin=None, rmax=None):
'''cut the tracer with bounds [rmin, rmax].
if only rmin or rmax is given, the other bound is not changed.
.. note::
This function not only selects particles within (rmin,rmax), but also sets the radial boundary for the dynamical model, so that only dyanmical consistency inside the selected radial range is checked. So always use this function if you want to change radial cuts. This function is automatically called when initializing a :class:`Tracer` with rmin/rmax.'''
if not ((rmin is None) and (rmax is None)):
if rmin is None:
rmin = self.rmin
if rmax is None:
rmax = self.rmax
lib.cut_tracer_particles(self._pointer, rmin, rmax)
self.__update_array()
def radial_count(self, nbin=10, logscale=True):
'''bin the particles radially, to be used for radial likelihood calculation.
The histogram will be recorded in Tracer.RadialCount[].
.. note::
This function is automatically called by the relevant likelihood functions such as :func:`likelihood`,
:func:`dyn_fit`, :func:`scan_confidence` when the estimator is :const:`Estimators` ``.RBinLike``. In these cases,
`nbin` and `logscale` will be determined according to :attr:`Estimators.RBinLike.nbin` and :attr:`Estimators.RBinLike.logscale`.
So usually you do not need to call this function explicitly.'''
lib.count_tracer_radial(self._pointer, nbin, logscale)
def predict_radial_count(self, nbin=100, logscale=True):
'''predict radial counts according to oPDF.
:func:`set_phase` must have been called prior to calling this.
return predicted counts.'''
n = np.empty(nbin, dtype='f8')
lib.predict_radial_count(
n.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), nbin, logscale, self._pointer)
# lib.predict_radial_count(n.ctypes.data, nbin) //also work?
return n
def gen_bin(self, bintype, nbin=30,logscale=True, equalcount=False):
'''return bin edges. divide into nbin bins, with nbin+1 edges.'''
proxy = self.data[bintype]
n = nbin +1
if equalcount:
x = np.array(np.percentile(proxy, list(np.linspace(0, 100, n))))
else:
if logscale:
x = np.logspace(np.log10(proxy[proxy >0].min()), np.log10(proxy.max()), n)
else:
x = np.linspace(proxy.min(), proxy.max(), n)
return x
def create_views(self, n=10, proxy='L'):
'''sort the particles according to proxy,
and divide into n equal-size subsamples sequentially.
these subsamples does not copy the particle data,
but only points to the corresponding segments of data in the parent sample,
so they are called views, and can be accessed through Tracer.Views[i] from
the parent sample.
the energy need to have been set before calling if proxy is E'''
lib.create_tracer_views(self._pointer, n, proxy)
def destroy_views(self):
'''erase any views from a tracer'''
lib.free_tracer_views(self._pointer)
def print_data(self, i=0):
'''print information of particle i'''
print('---from python---')
print(self.nP, '%0x' % ctypes.addressof(self.P.contents))
print(self.P[i].x[0], self.P[i].r)
print(self.data[i]['x'][0], self.data[i]['r'])
print('--- from C ---')
lib.print_tracer_particle(self._pointer, i)
print('=================')
def NFW_like(self, pars=[1, 1]):
'''NFW log-likelihood. the halo should have been set to one of the NFW types before calling this.
pars are the parameters to be passed to the halo.
return log(likelihood)'''
return lib.NFW_like(Param_t(*pars), self._pointer)
def set_energy(self):
'''determine particle energy in the attached halo'''
lib.tracer_set_energy(self._pointer)
def set_orbits(self, set_phase=True):
'''prepare particle orbits inside the attached halo
set_phase: whether to calculate the phase-angle of each particle.
phase angle is needed by AD and MeanPhase estimators, but not by RBinLike
see estimator.need_phase for each estimator.
'''
lib.tracer_set_orbits(self._pointer, set_phase)
def set_phase(self, pars, need_theta=True):
'''prepare the phases for phase-related calculations such as like_eval or phase_density'''
self.halo.set_param(pars)
self.set_energy()
self.set_orbits(need_theta)
def like_eval(self, estimator):
'''evaluate likelihood or TS with the given estimator.
.. note::
This is a low-leve function. One has to call :func:`set_phase` before calling this.
Use :func:`likelihood` which combines :func:`like_eval` and :func:`set_phase` automatically, unless you do want to call them separately.
:param estimator: estimator to use for the likelihood or TS calculation.
:returns: the likelihood (if estimator= :const:`Estimators` ``.RBin``) or TS value (:math:`\\bar{\\Theta}`\ for :const:`Estimators` ``.MeanPhaseRaw``, :math:`\\bar{\\Theta}^2`\ for :const:`Estimators` ``.MeanPhase``, or AD distance for :const:`Estimators` ``.AD``).
'''
return lib.like_eval(estimator.value, self._pointer)
def likelihood(self, pars=[1, 1], estimator=Estimators.MeanPhaseRaw, auto_rbin=True):
'''calculate likelihood or test-statistic.
it automatically prepares orbits and then calculates the likelihood or TS
(i.e., it combines :func:`set_phase` and :func:`like_eval`).
:param pars: parameter values
:param estimator: estimator to use for the likelihood or TS calculation.
:param auto_rbin: whether to prepare radial bins automatically. Only relevant if you are using Estimators.RBin. default to True. set to false if you have prepared the bins manually (by calling :func:`radial_count`).
:returns: the likelihood (if estimator= :const:`Estimators` ``.RBin``) or TS value (:math:`\\bar{\\Theta}`\ for :const:`Estimators` ``.MeanPhaseRaw``, :math:`\\bar{\\Theta}^2`\ for :const:`Estimators` ``.MeanPhase``, or AD distance for :const:`Estimators` ``.AD``).
'''
if auto_rbin and estimator == Estimators.RBinLike:
self.radial_count(estimator.nbin, estimator.logscale)
self.set_phase(pars, estimator.need_phase)
return self.like_eval(estimator)
def create_nested_views(self, viewtypes='EL', nbins=[10, 10]):
'''create nested views, i.e., create views according to first proxy,
then create sub-views for each view according to the second proxy and so on.
viewtypes can be one, two or more proxies, e.g, 'E','EL','LEr'.
len(nbins) must match len(viewtypes).
the energy need to be set before calling if creating E views'''
try:
nbins = list(nbins)
except:
nbins = [nbins]
lib.create_nested_views((ctypes.c_int *(len(nbins)+1))(
*nbins), ctypes.c_char_p(viewtypes.encode('utf-8')), self._pointer)
def nested_views_like(self, estimator=Estimators.AD):
'''evaluate likelihood in at the deepest views, and return the sum of them.
The likelihood for each view is also availabel in Views[i].lnL'''
nbin_r = 0
logscale = False
if estimator == Estimators.RBinLike:
nbin_r = estimator.nbin
logscale = estimator.logscale
return lib.nested_views_like(estimator.value, self._pointer, nbin_r, logscale)
def scan_like(self, estimator, x, y):
'''scan a likelihood surface.
x,y are the vectors specifying the binning along x and y dimensions
return the likelihood value z on grids, to be used for contour plots as
>>> contour(x,y,z)
'''
if estimator == Estimators.RBinLike:
self.radial_count(estimator.nbin, estimator.logscale)
z = [self.likelihood([m, c], estimator, False) for m in x for c in y]
return np.array(z).reshape([len(y), len(x)], order="F")
def scan_confidence(self, estimator, x0, ngrids=[10, 10], dx=[0.5, 0.5], logscale=False, maxlike=None):
'''scan significance levels around parameter value x0.
it scans ngrids linear bins from x0-dx to x0+dx if logscale=False,
or ngrids log bins from log10(x0)-dx to log10(x0)+dx if logscale=True.
If maxlike is given, it is interpreted as the global maximum log-likelihood, and is used to determine significance for RBinLike estimator;
otherwise the maximum likelihood is automatically scanned for RBinLike.
:returns: [x,y,sig,like]
x,y: the scanned grid points, vectors.
sig: the significance on the grids, of shape [len(x),len(y)]
like: the likelihood or figure of merits on the grids. same shape as sig.
'''
if logscale:
xl = np.log10(x0) -dx
xu = np.log10(x0) +dx
x = np.logspace(xl[0], xu[0], ngrids[0])
y = np.logspace(xl[1], xu[1], ngrids[1])
else:
xl = x0 -dx
xu = x0 +dx
x = np.linspace(xl[0], xu[0], ngrids[0])
y = np.linspace(xl[1], xu[1], ngrids[1])
like = self.scan_like(estimator, x, y)
if estimator == Estimators.RBinLike:
if maxlike is None:
xfit, maxlike, status = self.dyn_fit(estimator, x0)
sig = Chi2Sig(2 *(maxlike-like), 2)
elif estimator == Estimators.AD:
sig = AD2Sig(like)
elif estimator == Estimators.MeanPhase:
sig = np.sqrt(like)
elif estimator == Estimators.MeanPhaseRaw:
sig = np.abs(like)
return x, y, sig, like
def TSprof(self, pars, proxy='L', nbin=100, estimator=Estimators.MeanPhaseRaw):
'''calculate the likelihood inside equal-count bins of proxy.
return the loglike or f.o.m. for the estimator in each bin, and the bin edges.
proxy and nbin can also be of len>1; in that case, use
self.Views[i].Views[j].lnL and self.Views[i].Views[j].proxybin
to get the likelihood and bins in each node'''
self.halo.set_param(pars)
self.set_energy()
if proxy != 'r': # r-view orbits will be updated internally.
self.set_orbits()
self.create_nested_views(proxy, nbin)
self.nested_views_like(estimator)
ts = [self.Views[i].lnL for i in range(self.nView)]
ts = np.array(ts)
x = self.gen_bin(proxy, nbin, equalcount=True)
return ts, x
def plot_TSprof(self, pars, proxy='L', nbin=100, estimator=Estimators.MeanPhaseRaw, xtype='percent-phys', linestyle='r-'):
'''plot the TS profile in equal-count bins of proxy.
xtype: can be one of 'percent', 'physical', and 'percent-phys'.
when xtype='percent', plot the x-axis with percents.
if xtype='phys', plot x-axis with physical values.
if xtype='percent-phys', plot xaxis in percent scale but label with physical values.
'''
ts, x = self.TSprof(pars, proxy, nbin, estimator)
percents = (np.arange(nbin) +0.5)/nbin*100
xmid = (x[:-1] +x[1:])/2
if xtype == 'percent':
h, = plt.plot(percents, ts, linestyle)
plt.xlabel('Percentile in ' + proxy)
if xtype == 'percent-phys':
h, = plt.plot(percents, ts, linestyle)
plt.xticks(percents[1::3], ['%.1f' % np.log10(a) for a in x][1::3])
plt.xlabel(r'$\log(' + proxy+')$')
if xtype == 'phys':
h, = plt.plot(np.log10(xmid), ts, linestyle)
plt.xlabel(r'$\log(%s)$' % proxy)
plt.ylabel(estimator)
if estimator == Estimators.MeanPhaseRaw:
plt.plot(plt.xlim(), [0, 0], 'k--')
return h
def TSprofCum(self, pars, proxy='r', bins=100, estimator=Estimators.AD):
'''cumulative TS profile.
reuturn bin edges, ts, counts'''
self.halo.set_param(pars)
self.set_energy()
self.sort(proxy)
if proxy == 'E':
self.data[...] = self.data[-1::-1] # reverse the order
n, bins = np.histogram(self.data[proxy], bins)
if proxy == 'E':
n = n[-1::-1].cumsum()
else:
n = n.cumsum()
ts = []
if proxy != 'r':
self.set_orbits(True)
for i, x in enumerate(bins[1:]):
if n[i] == 0:
y = np.nan
else:
with self.copy(0, n[i]) as s:
if proxy != 'r':
y = s.like_eval(pars, estimator)
else:
s.rmax = x
s.set_orbits()
y = s.like_eval(pars, estimator)
ts.append(y)
if proxy == 'E':
ts = ts[-1::-1]
return np.array(bins), np.array(ts), np.array(n)
def solve_meanphase(self, x0, y0=0., verbose=0):
'''find the halo parameter x at which MeanPhaseRaw(x)=y0.
:param x0: the initial value of x to start the search.
:param y0: the mean phase value, so that MeanPhaseRaw(x)=y0.
:param verbose: 0 or 1, whether to print additional convergence information.
:return: (x,y,flag).
x: solution
y: MeanPhaseRaw(x)-y0
success flag:
0: fail to converge;
1: successfully found solution;
2: failed to find solution for MeanPhaseRaw(x)-y0=0, but found minimum for (MeanPhaseRaw(x)-y0)**2.
.. note::
The tracer halo type must be set before invoking this function.
'''
likeraw = lambda m: self.likelihood(
[m], Estimators.MeanPhaseRaw, False)-y0
likeln = lambda x: likeraw(np.exp(x))
like = lambda m: likeraw(m)**2
# initialize the interval bracketing the root
d = 2.
a = [x0 *d, x0]
f = [likeraw(x) for x in a]
if abs(f[1]) < abs(f[0]):
f = f[::-1]
a = a[::-1]
d = 1. /d
niter = 0
while f[0]*f[1] > 0 and niter <10:
a[1] = a[0]
a[0] *= d # push a[0] toward root
f[1] = f[0]
f[0] = likeraw(a[0])
niter += 1
if niter >= 10:
if verbose > 0:
print("Failed to bracket root after %d iterations; now search for func minimum instead." % niter)
result = fmin(like, x0, full_output=True, disp=verbose)
success = 2 # retreat to fmin
if result[4] > 0:
success = 0 # failed to converge
return result[0][0], result[1], success
# find the root
# print a,f, np.log(a), [likeln(np.log(a[0])), likeln(np.log(a[1]))]
x = brentq(likeln, np.log(a[0]), np.log(a[1]), rtol=1e-4)
x0 = np.sqrt(a[0] *a[1])
# x=newton(likeln, np.log(x0),tol=1e-4)
y = likeln(x)
x = np.exp(x)
# x=brentq(likeraw, a[0],a[1], rtol=1e-2)
# x=newton(likeraw, x0,tol=1e-4)
# y=likeraw(x)
success = abs(y) <1e-2
return x, y, success
def mark_phase_mass(self, m0=100., verbose=0):
''' estimate mass :math:`M(<r_c)` using the "PhaseMark" method.
:param m0: the initial guess of the mass, optional.
:param verbose: whether to print diagnostic information, default no.
:return: r,m,ml,mu,flag,flagl, flagu
r: the characteristic radius of the tracer in between rlim
m: the mass of the halo contained inside r
ml: 1-sigma lower bound on m
mu: 1-sigma upper bound on m
flag: whether m and r have converged (0:no; 1:yes; 2: no solution to the phase equation, but closest point found).
flagl: whether ml has converged
flagu: whether mu has converged
'''
rmed = np.median(self.data['r'])
self.halo.set_type(HaloTypes.PointM)
result = self.solve_meanphase(m0, verbose=verbose)
flag1 = result[-1]
if verbose > 0:
print(result)
m = result[0]
ml, _, flagl = self.solve_meanphase(m, -1., verbose=verbose)
mu, _, flagu = self.solve_meanphase(m, 1., verbose=verbose)
self.halo.set_type(HaloTypes.IsothermalK)
result = self.solve_meanphase(m0 /rmed, verbose=verbose)
flag2 = result[-1]
if verbose > 0:
print(result)
k = result[0]
# kl=self.solve_meanphase(k, -1.)[0]
# ku=self.solve_meanphase(k, 1.)[0]
r = m /k
# rl,ru=rlim
# rl=max(ml/ku,rlim[0])
# ru=min(mu/kl,rlim[1])
flag = flag1 &flag2
if flag > 0:
flag = max(flag1, flag2) # in case any of them equal to 2
return r, m, ml, mu, flag, flagl, flagu
def phase_mass_bin(self, xlim, proxy='r', m0=100., verbose=0):
''' estimate mass :math:`M(<r_c)` for tracer with property "proxy" selected in between xlim, using the "PhaseMark" method.
:param xlim: the range of property to select the tracer.
:param proxy: the property to select the tracer, 'r' or 'L'
:param m0: the initial guess of the mass, optional.
:param verbose: whether to print diagnostic information, default no.
:return: r,m,ml,mu,flag,flagl, flagu
r: the characteristic radius of the tracer in between rlim
m: the mass of the halo contained inside r
ml: 1-sigma lower bound on m
mu: 1-sigma upper bound on m
flag: whether m and r have converged (0:no; 1:yes; 2: no solution to the phase equation, but closest point found).
flagl: whether ml has converged
flagu: whether mu has converged
'''
if proxy in 'rR':
S = self.copy()
S.radial_cut(xlim[0], xlim[1])
else:
S = self.select((self.data[proxy] >xlim[0])&(self.data[proxy]<xlim[1]))
out = S.mark_phase_mass(m0, verbose)
S.clean()
return out
def phase_mark_fit(self, par0=[1, 1], nbin=2, proxy='r', equalcount=True):
'''fit halo potential with phase mark. The halo of the tracer need to be initialized to the desired type before fitting.
:param par0: initial parameter values. len(par0) also gives the number of free parameters.
:param nbin: number of bins. if nbin<len(par0), then the number of bins is set to len(par0) to avoid overfitting.
:param proxy: the tracer property used to bin the sample into subsamples. 'r' or 'L'.
:param equalcount: whether to use equalcount bins (each subsample has equal number of particles) or logarithmic bins in proxy.
:return:
par: best-fit parameter
Cov: covariance matrix of the parameters
data: phase-mark data, array of shape [nbin, 7]. each column is the fitting result [r,m,ml,mu,flag,flagl, flagu] to one bin, with (r,m) giving the radius and mass, (ml,mu) giving the lower and upper bound on mass, (flag, flagl, flagu) specifying whether the fit converged for mass and its lower and upper bounds (0:no; 1:yes; 2: no solution to the phase equation, but closest point found).
.. note::
if the code complains about curve_fit() keyword error, you need to upgrade your scipy to version 0.15.1 or newer.
'''
nbin = max(nbin, len(par0))
x = self.gen_bin(proxy, nbin, equalcount=equalcount)
data = np.array([self.phase_mass_bin(x[[i, i+1]], proxy)
for i in range(len(x)-1)])
flag = (data[:, 4] ==1)
sig1 = data[:, 3] -data[:, 1]
sig2 = data[:, 1] -data[:, 2]
sig = (sig1 +sig2)/2
def halomass(r, *pars):
self.halo.set_param(pars)
return self.halo.mass(r)
par, Cov = curve_fit(halomass, data[flag, 0], data[flag, 1], sigma=sig[
flag], p0=par0, absolute_sigma=1) #need scipy version >0.15.1
return par, Cov, data
def NFW_fit(self, x0=[1, 1], minuittol=1):
''' to fit an NFW density PDF with maximum likelihood.
.. note::
You need the `iminuit <https://pypi.python.org/pypi/iminuit>`_ python package before you can use this function. If you don't have that, you need to comment out the `iminuit` related imports in the header of `oPDF.py`.
:param x0: initial value of halo parameters. the interpretation of them depends on the halotype and scales of the tracer's halo. see Tracer.halo of :class:`Tracer` and halo.type, halo.scales of :class:`halo`.
:param minuittol: tolerance of minuit to consider convergence. Convergence is defined when the estimated distance to minimum edm<1e-4*minuittol*0.5
:return: results will be printed on screen.
also return minuit result and the minuit minimizer.
Please consult the `iminuit <https://pypi.python.org/pypi/iminuit>`_ documentation for the `iminuit` outputs.
.. note::
This is only intended for fitting the Dark Matter density profile to get the NFW parameters.
The tracer particle mass should have been properly assigned or adjusted,
so that mP*number_density=physical_density.
If you have sampled n particles from the full sample of n0 particles,
remember to adjust the mP of the sample to be mP0*n0/n, so that total mass is conserved.
'''
if not self.halo.isNFW():
print("Error: not an NFW halo. use Tracer.halo.set_type() to set halo type to NFW before NFW_fit()")
like = lambda m, c: -self.NFW_like([m, c])
# profilelikelihood ratio error-def: chi-square1
m = Minuit(like, m=x0[0], c=x0[1], print_level=0, pedantic=False,
error_m=0.1, error_c=0.1, errordef=0.5, frontend=ConsoleFrontend())
m.tol = minuittol # default convergence edm<1e-4*tol*errordef, but we do not need that high accuracy
result = m.migrad()
m.print_fmin()
m.print_matrix()
return result, m
def dyn_fit(self, estimator=Estimators.RBinLike, x0=[1, 1], xtol=1e-3, ftol_abs=0.01, maxiter=500, verbose=0):
'''
dynamical fit with the given estimator
Parameters
estimator(Estimator): estimator to use. select one from :data:`Estimators`.
x0(array-like): initial parameter values
xtol: tolerance in x to consider convergence
ftol_abs: tolerance in function values to consider convergence.
convergence is reached when both dx<xtol and df<ftol_abs between subsequent steps in the search.
maxiter: maximum number of iterations
verbose: whether to print during each step.
Returns
[x, fval, status_success]
x(array): the best fit parameter
fval(float): log-likelihood or fig of merit, depending on estimator
status_success(bool): whether the search converged successfully, 1 if yes, 0 if no.
'''
if estimator == Estimators.RBinLike:
self.radial_count(estimator.nbin, estimator.logscale)
status_success = lib.DynFit(
Param_t(*x0), len(x0), xtol, ftol_abs, maxiter, verbose, estimator.value, self._pointer)
x = np.array(self.halo.pars[:len(x0)])
return x, self.lnL, status_success
def phase_density(self, proxy='E', bins=100, method='hist', logscale=False, weight=False, return_data=False):
'''estimate density in proxy-theta space'''
if logscale:
f = self.data[proxy] >0
data = np.array(
(np.log10(self.data[proxy][f]), self.data['theta'][f]))
w = self.data['w'][f]
else:
data = np.array((self.data[proxy], self.data['theta']))
w = self.data['w']
if weight:
X, Y, Z = density_of_points(
data, bins=bins, method=method, weights=w)
else:
X, Y, Z = density_of_points(data, bins=bins, method=method)
extent = get_extent(X, Y)
if return_data:
return X, Y, Z, extent, data
else:
return X, Y, Z, extent
def phase_image(self, pars, proxy, bins=30, logscale=True):
'''plot an image of the particle distribution in proxy-theta space
:param pars: parameters specifying the potential
:param proxy: proxy to use for the proxy-theta plot, 'E' or 'L'.
:param bins: binning in proxy. if an integer, create the input number of bins. If a list of two arrays [xbins, ybins], use the array as the bins.
:param logscale: True or False, whether to bin in logscale or not when bins is an integer.'''
self.set_phase(pars)
# determine the phase-angles with the real halo
# parameters x0
X, Y, Z, extent = self.phase_density(
proxy, bins=bins, logscale=logscale)
plt.imshow(Z, extent=extent)
plt.axis('tight')
if logscale:
plt.xlabel(r'$\log(' + proxy+')$')
else:
plt.xlabel(r'$' + proxy+'$')
plt.ylabel(r'$\theta$')
# def gfmin_like(self, estimator, x0=[1,1], xtol=1e-3, ftolabs=0.01):
# like=lambda x: lib.like_to_chi2(self.freeze_and_like(x, estimator), estimator) #the real likelihood prob
# return fmin_gsl(like, x0, xtol=xtol, ftolabs=ftolabs, maxiter=500,
# full_output=True)
# def gfmin_dist(self, estimator, x0=[1,1], xtol=1e-3, ftolabs=0.01):
# like=lambda x: -self.freeze_and_like(x, estimator) #distance
# return fmin_gsl(like, x0, xtol=xtol, ftolabs=ftolabs, maxiter=500,
# full_output=True)
_lib_open() # allocate integration space
if __name__ == "__main__":
datafile = oPDFdir +'/data/mockhalo.hdf5'
FullSample = Tracer(datafile=datafile)
Sample = FullSample.copy(0, 1000)
FullSample.print_data(10)
Sample.print_data(10)
# Sample.data[1]['r']=2
Sample.radial_cut(1, 1000)
Sample.print_data(1)
Sample.sort('L')
Sample.print_data(1)
Sample.halo.set_type(HaloTypes.NFWMC, scales=[183.5017, 16.1560])
Sample.radial_count(10)
result = Sample.dyn_fit(Estimators.RBinLike, verbose=1)
print(Sample.likelihood([1, 1], Estimators.MeanPhase))
# FullSample.clean()
# Sample.clean()
# good practice: use with!
# with Tracer(datafile) as FullSample:
# with FullSample.copy() as Sample:
# FullSample.print_data()
# Sample.print_data()
# Sample.data[1]['r']=2
# Sample.radial_cut(rmin=10)
# Sample.print_data()
#_lib_close()
| {"/PotentialProf.py": ["/oPDF.py"], "/FitNFWProfML.py": ["/oPDF.py"]} |
67,031 | mariabenitocst/oPDF | refs/heads/master | /FitNFWProfML.py | import matplotlib
# matplotlib.use('Agg')
from oPDF import *
from myutils import *
import h5py
import os
import sys
from scipy.stats import chi2
plt.ion()
npart = int(1e6)
for halo in 'ABCDE':
with Tracer(halo + '2') as F:
with F.copy(0, npart) as S:
S.mP *= float(F.nP) /S.nP
result, m = S.minuit_NFWlike()
print('Halo' + halo, m.values['m'], m.errors['m'], m.values['c'], m.errors['c'])
| {"/PotentialProf.py": ["/oPDF.py"], "/FitNFWProfML.py": ["/oPDF.py"]} |
67,034 | cds-snc/domain-scan | refs/heads/master | /gatherers/censys.py | import csv
import json
import logging
import os
from typing import List
from google.cloud import bigquery
from google.oauth2 import service_account
import google.api_core.exceptions
from gatherers.gathererabc import Gatherer
from utils import utils
# Options:
#
# --timeout: Override the 10 minute job timeout (specify in seconds).
# --cache: Use locally cached export data instead of hitting BigQuery.
# Gathers hostnames from Censys.io via the Google BigQuery API.
#
# Before using this, you need to:
#
# * create a Project in Google Cloud, and an associated service account
# with access to create new jobs/queries and get their results.
# * give Censys.io this Google Cloud service account to grant access to.
#
# For details on concepts, and how to test access in the web console:
#
# * https://support.censys.io/google-bigquery/bigquery-introduction
# * https://support.censys.io/google-bigquery/adding-censys-datasets-to-bigquery
#
# Note that the web console access is based on access given to a Google account,
# but BigQuery API access via this script depends on access given to
# Google Cloud *service account* credentials.
# Defaults to 10 minute timeout.
default_timeout = 60 * 60 * 10
class Gatherer(Gatherer):
def gather(self):
# Returns a parsed, processed Google service credentials object.
credentials = load_credentials()
if credentials is None:
logging.warn("No BigQuery credentials provided.")
logging.warn("Set BIGQUERY_CREDENTIALS or BIGQUERY_CREDENTIALS_PATH environment variables.")
exit(1)
# When using this form of instantiation, the client won't pull
# the project_id out of the creds, has to be set explicitly.
client = bigquery.Client(
project=credentials.project_id,
credentials=credentials
)
# Allow override of default timeout (in seconds).
timeout = int(self.options.get("timeout", default_timeout))
# Construct the query.
query = query_for(self.suffixes)
logging.debug("Censys query:\n%s\n" % query)
# Plan to store in cache/censys/export.csv.
download_path = utils.cache_path(
"export", "censys", ext="csv", cache_dir=self.cache_dir)
# Reuse of cached data can be turned on with --cache.
cache = self.options.get("cache", False)
if (cache is True) and os.path.exists(download_path):
logging.warn("Using cached download data.")
# But by default, fetch new data from the BigQuery API,
# and write it to the expected download location.
else:
# Ensure cache destination exists.
utils.mkdir_p(os.path.dirname(download_path))
logging.warn("Kicking off SQL query job.")
rows = None
# Actually execute the query.
try:
# Executes query and loads all results into memory.
query_job = client.query(query)
iterator = query_job.result(timeout=timeout)
rows = list(iterator)
except google.api_core.exceptions.Forbidden:
logging.warn("Access denied to Censys' BigQuery tables.")
except:
logging.warn(utils.format_last_exception())
logging.warn("Error talking to BigQuery, aborting.")
# At this point, the job is complete and we need to download
# the resulting CSV URL in results_url.
logging.warn("Caching results of SQL query.")
download_file = open(download_path, 'w', newline='')
download_writer = csv.writer(download_file)
download_writer.writerow(["Domain"]) # will be skipped on read
# Parse the rows and write them out as they were returned (dupes
# and all), to be de-duped by the central gathering script.
for row in rows:
domains = row['common_name'] + row['dns_names']
for domain in domains:
download_writer.writerow([domain])
# End CSV writing.
download_file.close()
# Whether we downloaded it fresh or not, read from the cached data.
for domain in utils.load_domains(download_path):
if domain:
yield domain
# Constructs the query to run in BigQuery, against Censys'
# certificate datasets, for one or more suffixes.
#
# Example query:
#
# SELECT
# parsed.subject.common_name,
# parsed.extensions.subject_alt_name.dns_names
# FROM
# `censys-io.certificates_public.certificates`,
# UNNEST(parsed.subject.common_name) AS common_names,
# UNNEST(parsed.extensions.subject_alt_name.dns_names) AS sans
# WHERE
# (common_names LIKE "%.gov"
# OR sans LIKE "%.gov")
# OR (common_names LIKE "%.fed.us"
# OR sans LIKE "%.fed.us");
def query_for(suffixes: List[str]) -> str:
select = "\n".join([
" parsed.subject.common_name,",
" parsed.extensions.subject_alt_name.dns_names",
])
from_clause = "\n".join([
" `censys-io.certificates_public.certificates`,",
" UNNEST(parsed.subject.common_name) AS common_names,",
" UNNEST(parsed.extensions.subject_alt_name.dns_names) AS sans",
])
# Returns query fragment for a specific suffix.
def suffix_query(suffix):
return "\n".join([
"(common_names LIKE \"%%%s\"" % suffix,
" OR sans LIKE \"%%%s\")" % suffix,
])
# Join the individual suffix clauses into one WHERE clause.
where = str.join("\n OR ", [suffix_query(suffix) for suffix in suffixes])
query = "\n".join([
"SELECT",
select,
"FROM",
from_clause,
"WHERE",
" %s" % where
])
return query
def get_credentials_from_env_var_or_file(env_var: str="",
env_file_var: str="") -> str:
creds = os.environ.get(env_var, None)
if creds is None:
path = os.environ.get(env_file_var, None)
if path is not None:
with open(path) as f:
creds = f.read()
return creds
# Load BigQuery credentials from either a JSON string, or
# a JSON file. Passed in via environment variables either way.
def load_credentials():
creds = get_credentials_from_env_var_or_file(
env_var="BIGQUERY_CREDENTIALS",
env_file_var="BIGQUERY_CREDENTIALS_PATH")
if creds is None:
return None
parsed = json.loads(creds)
return service_account.Credentials.from_service_account_info(parsed)
| {"/gatherers/censys.py": ["/gatherers/gathererabc.py"], "/tests/test_scan_utils.py": ["/tests/context.py"], "/tests/test_utils.py": ["/tests/context.py"], "/gatherers/url.py": ["/gatherers/gathererabc.py"], "/tests/test_gatherers_censys.py": ["/tests/context.py"], "/tests/test_gatherers_rdns.py": ["/tests/context.py"], "/scanners/noopabc.py": ["/scanners/scannerabc.py"], "/gatherers/rdns.py": ["/gatherers/gathererabc.py"]} |
67,035 | cds-snc/domain-scan | refs/heads/master | /scanners/csp.py | import logging
import requests
from scanners import utils
###
# CSP Scanner - check the presence of CSP headers
#
# Set a default number of workers for a particular scan type.
# Overridden by a --workers flag.
workers = 2
# default to a custom user agent, can be overridden
user_agent = "github.com/18f/domain-scan, csp.py"
def init_domain(domain, environment, options):
cache_dir = options.get("_", {}).get("cache_dir", "./cache")
# If we have data from pshtt, skip if it's not a live domain.
if utils.domain_not_live(domain, cache_dir=cache_dir):
logging.debug("\tSkipping, domain not reachable during inspection.")
return False
# If we have data from pshtt, skip if it's just a redirector.
if utils.domain_is_redirect(domain, cache_dir=cache_dir):
logging.debug("\tSkipping, domain seen as just an external redirector during inspection.")
return False
# requests needs a URL, not just a domain.
url = None
if not (domain.startswith('http://') or domain.startswith('https://')):
# If we have data from pshtt, use the canonical endpoint.
if utils.domain_canonical(domain, cache_dir=cache_dir):
url = utils.domain_canonical(domain, cache_dir=cache_dir)
# Otherwise, well, ssl should work.
else:
url = 'https://' + domain
else:
url = domain
return {'url': url}
def scan(domain, environment, options):
logging.debug("CSP Check called with options: %s" % options)
url = environment.get("url", domain)
logging.debug("URL: %s", url)
response = requests.get(url)
csp_set = False
if "content-security-policy" in response.headers:
csp_set = True
logging.warn("Complete!")
return {
'csp_set': csp_set
}
# Required CSV row conversion function. Usually one row, can be more.
#
# Run locally.
def to_rows(data):
return [
[data['csp_set']]
]
# CSV headers for each row of data. Referenced locally.
headers = ["CSP Set for domain"]
| {"/gatherers/censys.py": ["/gatherers/gathererabc.py"], "/tests/test_scan_utils.py": ["/tests/context.py"], "/tests/test_utils.py": ["/tests/context.py"], "/gatherers/url.py": ["/gatherers/gathererabc.py"], "/tests/test_gatherers_censys.py": ["/tests/context.py"], "/tests/test_gatherers_rdns.py": ["/tests/context.py"], "/scanners/noopabc.py": ["/scanners/scannerabc.py"], "/gatherers/rdns.py": ["/gatherers/gathererabc.py"]} |
67,036 | cds-snc/domain-scan | refs/heads/master | /tests/test_scan_utils.py | import os
import sys
from collections import namedtuple
from pathlib import Path
from .context import utils, scanners # noqa
from utils import scan_utils
from scanners import analytics, noop
import pytest
MockScanner = namedtuple("MockScanner", ["workers"])
@pytest.mark.parametrize("names,expected", [
(
["noop"],
([noop])
)
])
def test_build_scanner_list(names, expected):
assert scan_utils.build_scanner_list(names) == expected
@pytest.mark.xfail(raises=ImportError)
@pytest.mark.parametrize("names", [
["asdf"],
["missing_scanner"],
])
def test_build_scan_lists_import_error(names):
scan_utils.build_scanner_list(names)
@pytest.mark.parametrize("arg,suffix,expected", [
(
"whatever",
None,
["whatever"]
),
(
Path(Path(__file__).parent, "data", "domains.csv"),
None,
["achp.gov", "acus.gov"]
),
(
Path(Path(__file__).parent, "data",
"domains_no_suffixes.csv"),
".gov",
["achp.gov", "acus.gov"]
),
(
Path(Path(__file__).parent, "data",
"domains_no_suffixes.csv"),
"gov",
["achp.gov", "acus.gov"]
),
])
def test_domains_from(arg, suffix, expected):
result = list(scan_utils.domains_from(arg, domain_suffix=suffix))
assert result == expected
@pytest.mark.parametrize("arg,suffix,expected", [
(
Path(Path(__file__).parent, "data",
"domains_no_suffixes.tab"),
"gov",
["achp.gov", "acus.gov"]
),
])
@pytest.mark.xfail(raises=TypeError)
def test_domains_from_type_error(arg, suffix, expected):
list(scan_utils.domains_from(arg, domain_suffix=suffix))
@pytest.mark.parametrize("domains,cache,expected", [
(
"whatever.gov",
"cache",
"whatever.gov"
),
(
"tests/data/domains.csv",
"cache",
Path(os.path.curdir, "tests/data/domains.csv").resolve()
),
])
def test_handle_domains_argument(domains, cache, expected):
result = scan_utils.handle_domains_argument(domains, cache)
assert result == expected
@pytest.mark.xfail(raises=IOError)
def test_handle_domains_argument_io_error():
scan_utils.handle_domains_argument("http://thing.notarealtld", "./cache")
@pytest.mark.xfail(raises=FileNotFoundError)
def test_handle_domains_argument_fnf_error():
scan_utils.handle_domains_argument("notarealfile.csv", "./cache")
@pytest.mark.parametrize("scans,opts,args,correct_opts, correct_unknown", [
(
[noop],
{},
["--noop-delay", "4"],
{"noop_delay": 4},
[],
),
(
[noop, analytics],
{"something": "else"},
["--noop-delay", "4", "--analytics", "tests/data/domains.csv"],
{
"analytics_domains": ["achp.gov", "acus.gov"],
"noop_delay": 4,
"something": "else"
},
[],
),
])
def test_handle_scanner_arguments(scans, opts, args, correct_opts, correct_unknown):
# This only handles a basic case and makes sure it's handed off correctly;
# tests for the scanner argument parsers themselves should be in the tests
# for those scanners.
opts, unknown = scan_utils.handle_scanner_arguments(scans, opts, args)
assert opts == correct_opts
assert unknown == correct_unknown
@pytest.mark.parametrize("scanner,options,w_default,w_max,expected", [
(
MockScanner(workers=23),
{},
5,
100,
23,
),
(
MockScanner(workers=23),
{"serial": True},
5,
100,
1,
),
(
(1, 2),
{"serial": False},
5,
4,
4,
),
(
(1, 2),
{"serial": False},
3,
4,
3,
),
])
def test_determine_scan_workers(scanner, options, w_default, w_max, expected):
result = scan_utils.determine_scan_workers(scanner, options, w_default,
w_max)
assert result == expected
@pytest.mark.parametrize("args,expected", [
(
"./scan 18f.gsa.gov --scan=analytics --analytics=http://us.ie/de.csv",
(
{
"domains": "18f.gsa.gov",
"cache": False,
"debug": False,
"lambda": False,
"meta": False,
"scan": "analytics",
"serial": False,
"sort": False,
"output": "./",
"_": {
"cache_dir": "./cache",
"report_dir": "./",
"results_dir": "./results"
}
},
["--analytics=http://us.ie/de.csv"]
)
),
(
"./scan tests/data/domains.csv --scan=noopabc",
(
{
"domains": "tests/data/domains.csv",
"cache": False,
"debug": False,
"lambda": False,
"meta": False,
"scan": "noopabc",
"serial": False,
"sort": False,
"output": "./",
"_": {
"cache_dir": "./cache",
"report_dir": "./",
"results_dir": "./results"
}
},
[]
)
),
])
def test_options(monkeypatch, args, expected):
monkeypatch.setattr(sys, "argv", args.split(" "))
result = scan_utils.options()
assert result == expected
| {"/gatherers/censys.py": ["/gatherers/gathererabc.py"], "/tests/test_scan_utils.py": ["/tests/context.py"], "/tests/test_utils.py": ["/tests/context.py"], "/gatherers/url.py": ["/gatherers/gathererabc.py"], "/tests/test_gatherers_censys.py": ["/tests/context.py"], "/tests/test_gatherers_rdns.py": ["/tests/context.py"], "/scanners/noopabc.py": ["/scanners/scannerabc.py"], "/gatherers/rdns.py": ["/gatherers/gathererabc.py"]} |
67,037 | cds-snc/domain-scan | refs/heads/master | /tests/test_runner.py | import csv
import pytest
from runner import runner
class MockScanner:
headers = ['field_a', 'field_b']
@pytest.fixture
def output_file(tmpdir):
return str(tmpdir.join('output.csv'))
def test_write_rows(output_file):
data = [
['value 1', 'value 2'],
['value 3', 'value 4'],
]
with open(output_file, 'w') as output_file_obj:
csv_writer = csv.writer(output_file_obj)
runner.write_rows(data, 'foo.gov', 'foo.gov', MockScanner(), csv_writer)
with open(output_file, 'r') as file_object:
reader = csv.DictReader(file_object, ['domain', 'base_domain', 'A', 'B'])
result = [dict(row) for row in reader]
assert result == [
{'domain': 'foo.gov', 'base_domain': 'foo.gov', 'A': 'value 1', 'B': 'value 2'},
{'domain': 'foo.gov', 'base_domain': 'foo.gov', 'A': 'value 3', 'B': 'value 4'},
]
def test_write_rows_no_data(output_file):
with open(output_file, 'w') as output_file_obj:
csv_writer = csv.writer(output_file_obj)
runner.write_rows(None, 'foo.gov', 'foo.gov', MockScanner(), csv_writer)
with open(output_file, 'r') as file_object:
reader = csv.DictReader(file_object, ['domain', 'base_domain', 'A', 'B'])
result = [dict(row) for row in reader]
assert result == [
{'domain': 'foo.gov', 'base_domain': 'foo.gov', 'A': '', 'B': ''}
]
def test_write_rows_with_meta(output_file):
data = [
['value 1', 'value 2'],
]
meta = {
'errors': ['error1'],
'start_time': 1521990106,
'end_time': 1521990206,
'duration': 100
}
with open(output_file, 'w') as output_file_obj:
csv_writer = csv.writer(output_file_obj)
runner.write_rows(data, 'foo.gov', 'foo.gov', MockScanner(), csv_writer, meta=meta)
with open(output_file, 'r') as file_object:
fields = ['domain', 'base_domain', 'A', 'B', 'errors', 'start_time', 'end_time', 'duration']
reader = csv.DictReader(file_object, fields)
result = [dict(row) for row in reader]
assert result == [{
'domain': 'foo.gov',
'base_domain': 'foo.gov',
'A': 'value 1',
'B': 'value 2',
'errors': 'error1',
'start_time': '2018-03-25T15:01:46Z',
'end_time': '2018-03-25T15:03:26Z',
'duration': '100.000000'
}]
def test_rows_with_lambda_meta(output_file):
data = [
['value 1', 'value 2'],
]
meta = {
'errors': ['error1'],
'start_time': 1521990106,
'end_time': 1521990206,
'duration': 100,
'lambda': {
'request_id': 1,
'log_group_name': 'group',
'log_stream_name': 'stream',
'start_time': 1521990107,
'end_time': 1521990205,
'memory_limit': 100,
'measured_duration': 98,
}
}
with open(output_file, 'w') as output_file_obj:
csv_writer = csv.writer(output_file_obj)
runner.write_rows(data, 'foo.gov', 'foo.gov', MockScanner(), csv_writer, meta=meta)
with open(output_file, 'r') as file_object:
fields = ['domain', 'base_domain', 'A', 'B', 'errors', 'start_time', 'end_time', 'duration',
'request_id', 'log_group_name', 'log_stream_name', 'lambda_start_time', 'lambda_end_time',
'memory_limit', 'measured_duration']
reader = csv.DictReader(file_object, fields)
result = [dict(row) for row in reader]
assert result == [{
'domain': 'foo.gov',
'base_domain': 'foo.gov',
'A': 'value 1',
'B': 'value 2',
'errors': 'error1',
'start_time': '2018-03-25T15:01:46Z',
'end_time': '2018-03-25T15:03:26Z',
'duration': '100.000000',
'lambda_end_time': '2018-03-25T15:03:25Z',
'lambda_start_time': '2018-03-25T15:01:47Z',
'log_group_name': 'group',
'log_stream_name': 'stream',
'measured_duration': '98.000000',
'memory_limit': '100',
'request_id': '1',
}]
| {"/gatherers/censys.py": ["/gatherers/gathererabc.py"], "/tests/test_scan_utils.py": ["/tests/context.py"], "/tests/test_utils.py": ["/tests/context.py"], "/gatherers/url.py": ["/gatherers/gathererabc.py"], "/tests/test_gatherers_censys.py": ["/tests/context.py"], "/tests/test_gatherers_rdns.py": ["/tests/context.py"], "/scanners/noopabc.py": ["/scanners/scannerabc.py"], "/gatherers/rdns.py": ["/gatherers/gathererabc.py"]} |
67,038 | cds-snc/domain-scan | refs/heads/master | /tests/test_utils.py | import argparse
import sys
import pytest
from .context import utils # noqa
from utils import utils as subutils
from utils import scan_utils
def get_default_values(parser):
# Get these from the parser rather than having to keep a manual list.
optional_actions = parser._get_optional_actions()
default_values = {}
for oa in optional_actions:
if oa.nargs == 0 and oa.const is True and oa.default is not None:
default_values.update(**{oa.dest: oa.default})
elif oa.nargs == 1 and oa.default is not None:
default_values.update(**{oa.dest: oa.default[0]})
return default_values
def get_args_with_mandatory_values(parser):
# Get these from the parser rather than having to keep a manual list.
optional_actions = parser._get_optional_actions()
mandatory_value_args = []
for oa in optional_actions:
if oa.nargs in ("?", "+", 1):
mandatory_value_args.append(oa.dest)
return mandatory_value_args
gather_default_values = get_default_values(
subutils.build_gather_options_parser([]))
gather_args_with_mandatory_values = get_args_with_mandatory_values(
subutils.build_gather_options_parser([]))
scan_default_values = get_default_values(
scan_utils.build_scan_options_parser())
scan_args_with_mandatory_values = get_args_with_mandatory_values(
scan_utils.build_scan_options_parser())
default_underscore_both = {
"cache_dir": "./cache",
"report_dir": "./",
"results_dir": "./results",
}
default_underscore_scan = {
**default_underscore_both
}
default_underscore_gather = {
**default_underscore_both,
}
@pytest.mark.parametrize("args,expected", [
(
"gather dap --dap=someurl --suffix=.gov",
{
"gatherers": ["dap"],
"dap": "someurl",
"suffix": [".gov"],
**gather_default_values,
"_": {**default_underscore_gather},
}
),
(
"gather dap --dap=someurl --suffix=gov",
{
"gatherers": ["dap"],
"dap": "someurl",
"suffix": [".gov"],
**gather_default_values,
"_": {**default_underscore_gather},
}
),
(
"gather dap --dap=someurl --suffix=gov,",
{
"gatherers": ["dap"],
"dap": "someurl",
"suffix": [".gov"],
**gather_default_values,
"_": {**default_underscore_gather},
}
),
(
"gather dap --dap=someurl --suffix=,gov",
{
"gatherers": ["dap"],
"dap": "someurl",
"suffix": [".gov"],
**gather_default_values,
"_": {**default_underscore_gather},
}
),
(
"gather dap --dap=someurl --suffix=.gov,.gov.uk",
{
"gatherers": ["dap"],
"dap": "someurl",
"suffix": [".gov", ".gov.uk"],
**gather_default_values,
"_": {**default_underscore_gather},
}
),
(
"gather dap --dap=someurl --suffix=.gov,gov.uk",
{
"gatherers": ["dap"],
"dap": "someurl",
"suffix": [".gov", ".gov.uk"],
**gather_default_values,
"_": {**default_underscore_gather},
}
),
(
"gather dap --dap=someurl --suffix=gov,gov.uk",
{
"gatherers": ["dap"],
"dap": "someurl",
"suffix": [".gov", ".gov.uk"],
**gather_default_values,
"_": {**default_underscore_gather},
}
),
(
"".join([
"gather dap --suffix=.gov --dap=",
"https://analytics.usa.gov/data/live/sites-extended.csv"]),
{
"gatherers": ["dap"],
**gather_default_values,
"_": {**default_underscore_gather},
"suffix": [".gov"],
"dap": "https://analytics.usa.gov/data/live/sites-extended.csv",
}
),
(
"".join([
"./gather censys,dap,private --suffix=.gov --dap=",
"https://analytics.usa.gov/data/live/sites-extended.csv",
" --private=/path/to/private-research.csv --parents=",
"https://github.com/GSA/data/raw/master/dotgov-domains/current-federal.csv",
]),
{
'gatherers': ['censys', 'dap', 'private'],
**gather_default_values,
"_": {**default_underscore_gather},
'suffix': ['.gov'],
'dap': 'https://analytics.usa.gov/data/live/sites-extended.csv',
'private': '/path/to/private-research.csv',
'parents': 'https://github.com/GSA/data/raw/master/dotgov-domains/current-federal.csv',
}
),
(
"".join([
"./gather censys,dap,private --suffix=.gov --dap=",
"https://analytics.usa.gov/data/live/sites-extended.csv",
" --private=/path/to/private-research.csv --parents=",
"https://github.com/GSA/data/raw/master/dotgov-domains/current-federal.csv",
]),
{
'gatherers': ['censys', 'dap', 'private'],
**gather_default_values,
"_": {**default_underscore_gather},
'suffix': ['.gov'],
'dap': 'https://analytics.usa.gov/data/live/sites-extended.csv',
'private': '/path/to/private-research.csv',
'parents': 'https://github.com/GSA/data/raw/master/dotgov-domains/current-federal.csv',
}
),
(
"".join([
"./gather dap --suffix=.gov --dap=",
"https://analytics.usa.gov/data/live/sites-extended.csv",
" --ignore-www",
]),
{
'gatherers': ['dap'],
**gather_default_values,
"_": {**default_underscore_gather},
'suffix': ['.gov'],
'dap': 'https://analytics.usa.gov/data/live/sites-extended.csv',
'ignore_www': True,
}
),
(
"".join([
"./gather dap --suffix=.gov --dap=",
"https://analytics.usa.gov/data/live/sites-extended.csv",
" --include-parents",
]),
{
'gatherers': ['dap'],
**gather_default_values,
"_": {**default_underscore_gather},
'suffix': ['.gov'],
'dap': 'https://analytics.usa.gov/data/live/sites-extended.csv',
'include_parents': True,
}
),
(
"".join([
"./gather dap --suffix=.gov --dap=",
"https://analytics.usa.gov/data/live/sites-extended.csv",
]),
{
'gatherers': ['dap'],
**gather_default_values,
"_": {**default_underscore_gather},
'suffix': ['.gov'],
'dap': 'https://analytics.usa.gov/data/live/sites-extended.csv',
}
),
(
"".join([
"./gather dap --suffix=.gov --dap=",
"https://analytics.usa.gov/data/live/sites-extended.csv",
" --debug",
]),
{
'gatherers': ['dap'],
**gather_default_values,
"_": {**default_underscore_gather},
'suffix': ['.gov'],
'dap': 'https://analytics.usa.gov/data/live/sites-extended.csv',
'debug': True,
}
),
])
def test_options_for_gather(monkeypatch, args, expected):
monkeypatch.setattr(sys, "argv", args.split(" "))
result = subutils.options_for_gather()
assert result == expected
@pytest.mark.parametrize("args", [
"./gather --yo --suffix=.gov",
"./gather , --suffix=.gov",
])
@pytest.mark.xfail(raises=argparse.ArgumentTypeError)
def test_options_for_gather_no_gatherer(monkeypatch, args):
monkeypatch.setattr(sys, "argv", args.split(" "))
subutils.options_for_gather()
@pytest.mark.parametrize("args", [
"./gather --help",
"./gather -h",
])
def test_options_for_gather_help(monkeypatch, capsys, args):
monkeypatch.setattr(sys, "argv", args.split(" "))
# Handling exception here instead of with decorator because we want to
# examine the console output.
with pytest.raises(SystemExit) as exc:
subutils.options_for_gather()
assert exc.typename == "SystemExit"
out, err = capsys.readouterr()
assert out.startswith("usage: gather GATHERERS")
@pytest.mark.parametrize("args", [
"./gather censys --suffix",
"./gather dap,censys --dap --suffix=.gov",
"./gather dap --dap --suffix=.gov",
"./gather dap --dap=something.url --suffix=.gov --parents",
])
@pytest.mark.xfail(raises=argparse.ArgumentError)
def test_options_for_gather_missing_arg_parameter(monkeypatch, args):
monkeypatch.setattr(sys, "argv", args.split(" "))
subutils.options_for_gather()
@pytest.mark.parametrize("args", [
"./gather censys --a11y_config=file.json --suffix=.gov",
"./gather dap --dap=file.json --suffix=.gov --cache",
"./gather dap --dap=file.json --suffix=.gov --timeout=10",
])
@pytest.mark.xfail(raises=argparse.ArgumentTypeError)
def test_options_for_gather_arg_mismatch(monkeypatch, args):
monkeypatch.setattr(sys, "argv", args.split(" "))
subutils.options_for_gather()
@pytest.mark.parametrize("arg", gather_args_with_mandatory_values)
@pytest.mark.xfail(raises=argparse.ArgumentError)
def test_options_for_gather_missing_mandatory(monkeypatch, arg):
command = "./gather censys --suffix=.gov --%s" % arg.replace("_", "-")
monkeypatch.setattr(sys, "argv", command.split(" "))
subutils.options_for_gather()
command = "./gather censys --suffix=.gov --%s=" % arg.replace("_", "-")
monkeypatch.setattr(sys, "argv", command.split(" "))
subutils.options_for_gather()
def test_options_for_scan_no_target(monkeypatch, capsys):
# Handling exception here instead of with decorator because for some reason
# even our enhanced ArgumentParser generates SystemExit for this error
# instead of a more specific exception.
command = "./scan --scan=a11y"
monkeypatch.setattr(sys, "argv", command.split(" "))
with pytest.raises(SystemExit) as exc:
scan_utils.options()
assert exc.typename == "SystemExit"
out, err = capsys.readouterr()
assert err.endswith("arguments are required: domains\n")
def test_options_for_scan_basic(monkeypatch):
command = "./scan example.org --scan=a11y"
monkeypatch.setattr(sys, "argv", command.split(" "))
result, _ = scan_utils.options()
assert result == {
"_": default_underscore_scan,
"domains": "example.org",
"scan": "a11y",
**scan_default_values,
"output": "./",
}
@pytest.mark.parametrize("args", [
"./scan --help",
"./scan -h",
])
def test_options_for_scan_help(monkeypatch, capsys, args):
monkeypatch.setattr(sys, "argv", args.split(" "))
# Handling exception here instead of with decorator because we want to
# examine the console output.
with pytest.raises(SystemExit) as exc:
scan_utils.options()
assert exc.typename == "SystemExit"
out, err = capsys.readouterr()
assert out.startswith("usage: scan [-h]")
@pytest.mark.parametrize("arg", scan_args_with_mandatory_values)
@pytest.mark.xfail(raises=argparse.ArgumentError)
def test_options_for_scan_missing_mandatory(monkeypatch, arg):
command = "./scan example.org --scan=a11y --%s" % arg.replace("_", "-")
monkeypatch.setattr(sys, "argv", command.split(" "))
scan_utils.options()
command = "./scan example.org --scan=a11y --%s=" % arg.replace("_", "-")
monkeypatch.setattr(sys, "argv", command.split(" "))
scan_utils.options()
@pytest.mark.xfail(raises=argparse.ArgumentTypeError)
def test_options_for_scan_lambda_profile_no_lambda(monkeypatch):
command = "./scan example.org --scan=a11y --lambda-profile=something"
monkeypatch.setattr(sys, "argv", command.split(" "))
scan_utils.options()
@pytest.mark.parametrize("command,expected", [
(
"./scan example.org --scan=a11y --workers=1",
{
"_": default_underscore_scan,
"domains": "example.org",
"scan": "a11y",
"workers": "1",
"output": "./",
**scan_default_values,
}
),
(
"./scan example.org --scan=a11y --output=..",
{
**scan_default_values,
"_": {
"cache_dir": "../cache",
"report_dir": "..",
"results_dir": "../results",
},
"domains": "example.org",
"scan": "a11y",
"output": "..",
}
),
])
def test_options_for_scan_check_for_single_args(monkeypatch, command, expected):
monkeypatch.setattr(sys, "argv", command.split(" "))
result, _ = scan_utils.options()
if not result == expected:
pytest.set_trace()
assert result == expected
| {"/gatherers/censys.py": ["/gatherers/gathererabc.py"], "/tests/test_scan_utils.py": ["/tests/context.py"], "/tests/test_utils.py": ["/tests/context.py"], "/gatherers/url.py": ["/gatherers/gathererabc.py"], "/tests/test_gatherers_censys.py": ["/tests/context.py"], "/tests/test_gatherers_rdns.py": ["/tests/context.py"], "/scanners/noopabc.py": ["/scanners/scannerabc.py"], "/gatherers/rdns.py": ["/gatherers/gathererabc.py"]} |
67,039 | cds-snc/domain-scan | refs/heads/master | /scanners/analytics.py | import argparse
import logging
import os
from pathlib import Path
from typing import Tuple
from urllib.parse import urlparse
from utils import utils, scan_utils
# Check whether a domain is present in a CSV, set in --analytics.
# The domains in --analytics will have been downloaded and
# loaded in options['analytics_domains'].
def scan(domain: str, environment: dict, options: dict):
analytics_domains = options["analytics_domains"]
return {
'participating': (domain in analytics_domains)
}
def to_rows(data):
return [
[data['participating']]
]
headers = ["Participates in Analytics"]
def handle_scanner_args(args, opts) -> Tuple[dict, list]:
"""
--analytics: file path or URL to a CSV of participating domains.
This function also handles checking for the existence of the file,
downloading it succesfully, and reading the file in order to populate the
list of analytics domains.
"""
parser = scan_utils.ArgumentParser(prefix_chars="--")
parser.add_argument("--analytics", nargs=1, required=True)
parsed, unknown = parser.parse_known_args(args)
dicted = vars(parsed)
should_be_single = ["analytics"]
dicted = scan_utils.make_values_single(dicted, should_be_single)
resource = dicted.get("analytics")
if not resource.endswith(".csv"):
no_csv = "".join([
"--analytics should be the file path or URL to a CSV of participating",
" domains and end with .csv, which '%s' does not" % resource
])
logging.error(no_csv)
raise argparse.ArgumentTypeError(no_csv)
try:
parsed_url = urlparse(resource)
except:
raise
if parsed_url.scheme and parsed_url.scheme in ("http", "https"):
analytics_path = Path(opts["_"]["cache_dir"], "analytics.csv").resolve()
try:
utils.download(resource, str(analytics_path))
except:
logging.error(utils.format_last_exception())
no_csv = "--analytics URL %s not downloaded successfully." % resource
logging.error(no_csv)
raise argparse.ArgumentTypeError(no_csv)
else:
if not os.path.exists(resource):
no_csv = "--analytics file %s not found." % resource
logging.error(no_csv)
raise FileNotFoundError(no_csv)
else:
analytics_path = resource
analytics_domains = utils.load_domains(analytics_path)
dicted["analytics_domains"] = analytics_domains
del dicted["analytics"]
return (dicted, unknown)
| {"/gatherers/censys.py": ["/gatherers/gathererabc.py"], "/tests/test_scan_utils.py": ["/tests/context.py"], "/tests/test_utils.py": ["/tests/context.py"], "/gatherers/url.py": ["/gatherers/gathererabc.py"], "/tests/test_gatherers_censys.py": ["/tests/context.py"], "/tests/test_gatherers_rdns.py": ["/tests/context.py"], "/scanners/noopabc.py": ["/scanners/scannerabc.py"], "/gatherers/rdns.py": ["/gatherers/gathererabc.py"]} |
67,040 | cds-snc/domain-scan | refs/heads/master | /scanners/scannerabc.py | from abc import ABCMeta, abstractmethod
import os
class ScannerABC(metaclass=ABCMeta):
lambda_support = False
def __init__(self, environment: dict, options: dict) -> None:
self.environment = environment
self.options = options
self.report_dir = self.options.get("output", "./")
self.cache_dir = os.path.join(self.report_dir, "cache")
self.results_dir = os.path.join(self.report_dir, "results")
self.name = self.__class__.__module__.split(".")[-1]
print(self.name)
# If the scanner needs to set configuration elements, this should be
# done in the subclass's __init__ method and stored in
# self.initialized_opts.
# This method should be called by the subclass's __init__ with:
# super().__init__(environment, options)
@abstractmethod
def scan(self) -> dict:
pass
@abstractmethod
def to_rows(self, data) -> list:
pass
@property
@abstractmethod
def headers(self):
# CSV headers for each row of data, e.g. ["Completed", "Constant", "Variable"]
pass
| {"/gatherers/censys.py": ["/gatherers/gathererabc.py"], "/tests/test_scan_utils.py": ["/tests/context.py"], "/tests/test_utils.py": ["/tests/context.py"], "/gatherers/url.py": ["/gatherers/gathererabc.py"], "/tests/test_gatherers_censys.py": ["/tests/context.py"], "/tests/test_gatherers_rdns.py": ["/tests/context.py"], "/scanners/noopabc.py": ["/scanners/scannerabc.py"], "/gatherers/rdns.py": ["/gatherers/gathererabc.py"]} |
67,041 | cds-snc/domain-scan | refs/heads/master | /process_a11y/a11y/process_a11y.py | import argparse
import csv
import json
from collections import defaultdict
from statistics import mean
from utils.utils import mkdir_p, results_dir
class A11yProcessor(object):
ERRORS = {
'1_1': 'Missing Image Descriptions',
'1_3': 'Form - Initial Findings',
'1_4': 'Color Contrast - Initial Findings',
'4_1': 'HTML Attribute - Initial Findings'
}
BRANCHES = {
'Legislative': [
'Library of Congress',
'The Legislative Branch (Congress)',
'Government Printing Office',
'Government Publishing Office',
'Congressional Office of Compliance',
'Stennis Center for Public Service',
'U.S. Capitol Police',
],
'Judicial': [
'The Judicial Branch (Courts)',
'The Supreme Court',
'U.S Courts',
],
'Non-federal': [
'Non-Federal Agency',
]
}
def __init__(self, a11y_path, domains_path):
self.a11y_raw = self.read_csv(a11y_path)
self.domain_raw = self.read_csv(domains_path)
self.domain_to_agency = {d[0].lower(): d[2] for d in self.domain_raw}
self.agency_to_branch = {a: b for b in self.BRANCHES for a in self.BRANCHES[b]}
def run(self):
data = [self.clean_row(d) for d in self.a11y_raw]
parsed_datasets = [
('a11y', self.make_a11y_data(data)),
('agencies', self.make_agency_data(data)),
('domains', self.make_domain_data(data)),
]
mkdir_p(results_dir())
for name, data in parsed_datasets:
path = '{}/{}.json'.format(results_dir(), name)
with open(path, 'w+') as f:
json.dump(data, f, indent=2)
def clean_row(self, row):
domain = row[0].lower()
agency = self.domain_to_agency.get(domain, 'N/A')
code = row[4]
results = {
'domain': domain,
'agency': agency,
'branch': self.agency_to_branch.get(agency, 'Executive')
}
if code:
results['error'] = self.get_error_category(code)
results['error_details'] = {
'code': code,
'typeCode': row[3],
'message': row[5],
'context': row[6],
'selector': row[7],
}
return results
def make_a11y_data(self, data):
results = defaultdict(lambda: defaultdict(list))
for d in data:
if 'error' in d:
results[d['domain']][d['error']].append(d['error_details'])
else:
results[d['domain']] = {}
# using json de/encode to convert defaultdicts back to dicts
return {'data': json.loads(json.dumps(results))}
def make_agency_data(self, data):
# first, group domain stats by agency
data_by_agency = defaultdict(list)
for d in self.make_domain_data(data)['data']:
data_by_agency[d['agency']].append(d)
# then, compute summary stats across groups
results = []
for agency, domain_stats in data_by_agency.items():
pages = len(domain_stats)
total_errors = sum(d['errors'] for d in domain_stats)
entry = {
'agency': agency,
'pages_count': pages,
'Average Errors per Page': (
'n/a' if pages == 0 else round(float(total_errors) / pages, 2)
)
}
# add in averages by error category
entry.update({
e: round(mean([d['errorlist'][e] for d in domain_stats]), 2)
for e in self.ERRORS.values()
})
results.append(entry)
return {'data': results}
def make_domain_data(self, data):
results = {}
for d in data:
dom = d['domain']
if dom not in results:
results[dom] = {
'agency': d['agency'],
'branch': d['branch'],
'canonical': dom,
'domain': dom,
'errors': 0,
'errorlist': {e: 0 for e in self.ERRORS.values()}
}
if 'error' in d:
results[dom]['errors'] += 1
results[dom]['errorlist'][d['error']] += 1
return {'data': list(results.values())}
def get_error_category(self, code):
error_id = code.split('.')[2].split('Guideline')[1]
return self.ERRORS.get(error_id, 'Other Errors')
@staticmethod
def read_csv(filename):
with open(filename, 'r') as f:
reader = csv.reader(f)
next(reader) # TODO: make header row skip configurable
return [row for row in reader]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--a11y', required=True)
parser.add_argument('--domains', required=True)
args = parser.parse_args()
A11yProcessor(args.a11y, args.domains).run()
| {"/gatherers/censys.py": ["/gatherers/gathererabc.py"], "/tests/test_scan_utils.py": ["/tests/context.py"], "/tests/test_utils.py": ["/tests/context.py"], "/gatherers/url.py": ["/gatherers/gathererabc.py"], "/tests/test_gatherers_censys.py": ["/tests/context.py"], "/tests/test_gatherers_rdns.py": ["/tests/context.py"], "/scanners/noopabc.py": ["/scanners/scannerabc.py"], "/gatherers/rdns.py": ["/gatherers/gathererabc.py"]} |
67,042 | cds-snc/domain-scan | refs/heads/master | /scanners/noop.py | import logging
from typing import Tuple
from utils.scan_utils import ArgumentParser, make_values_single
###
# Testing scan function. Does nothing time consuming or destructive,
# but exercises many of the main hooks of domain-scan.
# Set a default number of workers for a particular scan type.
# Overridden by a --workers flag.
workers = 2
# Optional one-time initialization for all scans.
# If defined, any data returned will be passed to every scan instance and used
# to update the environment dict for that instance
# Will halt scan execution if it returns False or raises an exception.
#
# Run locally.
def init(environment: dict, options: dict) -> dict:
logging.debug("Init function.")
return {'constant': 12345}
# Optional one-time initialization per-scan. If defined, any data
# returned will be passed to the instance for that domain and used to update
# the environment dict for that particular domain.
#
# Run locally.
def init_domain(domain: str, environment: dict, options: dict) -> dict:
logging.debug("Init function for %s." % domain)
return {'variable': domain}
# Required scan function. This is the meat of the scanner, where things
# that use the network or are otherwise expensive would go.
#
# Runs locally or in the cloud (Lambda).
def scan(domain: str, environment: dict, options: dict) -> dict:
logging.debug("Scan function called with options: %s" % options)
# Perform the "task".
complete = True
logging.warn("Complete!")
return {
'complete': complete,
'constant': environment.get('constant'),
'variable': environment.get('variable')
}
# Required CSV row conversion function. Usually one row, can be more.
#
# Run locally.
def to_rows(data):
return [
[data['complete'], data['constant'], data['variable']]
]
# CSV headers for each row of data. Referenced locally.
headers = ["Completed", "Constant", "Variable"]
# Optional handler for custom CLI parameters. Takes the args (as a list of
# strings) and returns a dict of the options values and names that the scanner
# expects, and a list of the arguments it didn't know how to parse.
#
# Should return a dict of the options parsed by this parser (not a mutated form
# of the opts that are passed to it) and a list of the remaining args that it
# didn't recognize.
def handle_scanner_args(args, opts) -> Tuple[dict, list]:
parser = ArgumentParser(prefix_chars="--")
parser.add_argument("--noop-delay", nargs=1)
parsed, unknown = parser.parse_known_args(args)
dicted = vars(parsed)
should_be_single = ["noop_delay"]
dicted = make_values_single(dicted, should_be_single)
dicted["noop_delay"] = int(dicted["noop_delay"], 10)
return dicted, unknown
| {"/gatherers/censys.py": ["/gatherers/gathererabc.py"], "/tests/test_scan_utils.py": ["/tests/context.py"], "/tests/test_utils.py": ["/tests/context.py"], "/gatherers/url.py": ["/gatherers/gathererabc.py"], "/tests/test_gatherers_censys.py": ["/tests/context.py"], "/tests/test_gatherers_rdns.py": ["/tests/context.py"], "/scanners/noopabc.py": ["/scanners/scannerabc.py"], "/gatherers/rdns.py": ["/gatherers/gathererabc.py"]} |
67,043 | cds-snc/domain-scan | refs/heads/master | /scanners/trustymail.py | import logging
###
# Inspect a site's DNS Mail configuration using DHS NCATS' trustymail tool.
###
# default to a long timeout
default_timeout = 30
# This is the same default timeout used in trustymail/scripts/trustymail
default_smtp_timeout = 5
# These are the same default ports used in trustymail/scripts/trustymail
default_smtp_ports = '25,465,587'
# We want to enforce the use of Google DNS by default. This gives
# more consistent results.
default_dns = '8.8.8.8,8.8.4.4'
# Advertise lambda support
lambda_support = True
def scan(domain, environment, options):
# Save the old logging level
old_log_level = logging.getLogger().getEffectiveLevel()
log_level = logging.WARN
if options.get('debug', False):
log_level = logging.DEBUG
logging.basicConfig(format='%(asctime)-15s %(message)s', level=log_level)
timeout = int(options.get('timeout', default_timeout))
smtp_timeout = int(options.get('smtp_timeout', default_smtp_timeout))
smtp_localhost = options.get('smtp_localhost', None)
smtp_ports = {int(port) for port in options.get('smtp_ports', default_smtp_ports).split(',')}
dns_hostnames = options.get('dns', default_dns).split(',')
# --starttls implies --mx
if options.get('starttls', False):
options.set('mx', True)
# Whether or not to use an in-memory SMTP cache. For runs against
# a single domain this will not make any difference, unless an MX
# record is duplicated.
smtp_cache = not options.get('no_smtp_cache', False)
# User might not want every scan performed.
scan_types = {
'mx': options.get('mx', False),
'starttls': options.get('starttls', False),
'spf': options.get('spf', False),
'dmarc': options.get('dmarc', False)
}
import trustymail
# Monkey patching trustymail to make it cache the PSL where we
# want
trustymail.PublicSuffixListFilename = 'cache/public-suffix-list.txt'
if environment['scan_method'] == 'lambda':
# Monkey patching trustymail to make the PSL cache read-only
trustymail.PublicSuffixListReadOnly = True
import trustymail.trustymail as tmail
data = tmail.scan(domain, timeout, smtp_timeout, smtp_localhost, smtp_ports, smtp_cache, scan_types, dns_hostnames).generate_results()
if not data:
logging.warn("\ttrustymail scan failed, skipping.")
# Reset the logging level
logging.getLogger().setLevel(old_log_level)
return data
def to_rows(data):
row = []
for field in headers:
value = data[field]
row.append(value)
return [row]
headers = [
"Live",
"MX Record", "Mail Servers", "Mail Server Ports Tested",
"Domain Supports SMTP", "Domain Supports SMTP Results",
"Domain Supports STARTTLS", "Domain Supports STARTTLS Results",
"SPF Record", "Valid SPF", "SPF Results",
"DMARC Record", "Valid DMARC", "DMARC Results",
"DMARC Record on Base Domain", "Valid DMARC Record on Base Domain",
"DMARC Results on Base Domain", "DMARC Policy", "DMARC Policy Percentage",
"DMARC Aggregate Report URIs", "DMARC Forensic Report URIs",
"DMARC Has Aggregate Report URI", "DMARC Has Forensic Report URI",
"Syntax Errors", "Debug Info"
]
| {"/gatherers/censys.py": ["/gatherers/gathererabc.py"], "/tests/test_scan_utils.py": ["/tests/context.py"], "/tests/test_utils.py": ["/tests/context.py"], "/gatherers/url.py": ["/gatherers/gathererabc.py"], "/tests/test_gatherers_censys.py": ["/tests/context.py"], "/tests/test_gatherers_rdns.py": ["/tests/context.py"], "/scanners/noopabc.py": ["/scanners/scannerabc.py"], "/gatherers/rdns.py": ["/gatherers/gathererabc.py"]} |
67,044 | cds-snc/domain-scan | refs/heads/master | /lambda/lambda_handler.py | import importlib
import sys
import logging
from utils import utils
# Central handler for all Lambda events.
def handler(event, context):
start_time = utils.local_now()
domain = event.get('domain')
options = event.get('options')
name = event.get('scanner')
environment = event.get('environment')
# Log all sent events, for the record.
utils.configure_logging(options)
logging.info(event)
# Might be acceptable to let this crash the module, in Lambda.
try:
scanner = importlib.import_module("scanners.%s" % name)
except ImportError:
exc_type, exc_value, exc_traceback = sys.exc_info()
logging.error("[%s] Scanner not found, or had an error during loading.\n\tERROR: %s\n\t%s" % (name, exc_type, exc_value))
exit(1) # ?
# Same method call as when run locally.
data = scanner.scan(domain, environment, options)
# We capture start and end times locally as well, but it's
# useful to know the start/end from Lambda's vantage point.
end_time = utils.local_now()
duration = end_time - start_time
response = {
'lambda': {
'log_group_name': context.log_group_name,
'log_stream_name': context.log_stream_name,
'request_id': context.aws_request_id,
'memory_limit': context.memory_limit_in_mb,
'start_time': start_time,
'end_time': end_time,
'measured_duration': duration
},
'data': data
}
# Serialize and re-parse the JSON, so that we run our own
# date transform functions in one place, before Amazon's built-in
# JSON serialization prepares the data for transport.
return utils.from_json(utils.json_for(response))
| {"/gatherers/censys.py": ["/gatherers/gathererabc.py"], "/tests/test_scan_utils.py": ["/tests/context.py"], "/tests/test_utils.py": ["/tests/context.py"], "/gatherers/url.py": ["/gatherers/gathererabc.py"], "/tests/test_gatherers_censys.py": ["/tests/context.py"], "/tests/test_gatherers_rdns.py": ["/tests/context.py"], "/scanners/noopabc.py": ["/scanners/scannerabc.py"], "/gatherers/rdns.py": ["/gatherers/gathererabc.py"]} |
67,045 | cds-snc/domain-scan | refs/heads/master | /tests/context.py | import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import gatherers # noqa
import scanners # noqa
import utils # noqa
| {"/gatherers/censys.py": ["/gatherers/gathererabc.py"], "/tests/test_scan_utils.py": ["/tests/context.py"], "/tests/test_utils.py": ["/tests/context.py"], "/gatherers/url.py": ["/gatherers/gathererabc.py"], "/tests/test_gatherers_censys.py": ["/tests/context.py"], "/tests/test_gatherers_rdns.py": ["/tests/context.py"], "/scanners/noopabc.py": ["/scanners/scannerabc.py"], "/gatherers/rdns.py": ["/gatherers/gathererabc.py"]} |
67,046 | cds-snc/domain-scan | refs/heads/master | /scanners/third_parties.py | import logging
from utils import utils
# Evaluate third party service usage using Chrome headless.
# Can also be run in Lambda.
lambda_support = True
# Signal that this is a JS-based scan using headless Chrome.
# The scan method will be defined in third_parties.js instead.
scan_headless = True
# Use pshtt data if we have it, to either skip redirect/inactive
# domains, or to start with the canonical URL right away.
def init_domain(domain, environment, options):
cache_dir = options.get("_", {}).get("cache_dir", "./cache")
# If we have data from pshtt, skip if it's not a live domain.
if utils.domain_not_live(domain):
logging.debug("\tSkipping, domain not reachable during inspection.")
return False
# If we have data from pshtt, skip if it's just a redirector.
if utils.domain_is_redirect(domain, cache_dir=cache_dir):
logging.debug("\tSkipping, domain seen as just an external redirector during inspection.")
return False
# To scan, we need a URL, not just a domain.
url = None
if not (domain.startswith('http://') or domain.startswith('https://')):
# If we have data from pshtt, use the canonical endpoint.
if utils.domain_canonical(domain, cache_dir=cache_dir):
url = utils.domain_canonical(domain, cache_dir=cache_dir)
# Otherwise, well, whatever.
else:
url = 'http://' + domain
else:
url = domain
# Standardize by ending with a /.
url = url + "/"
return {'url': url}
# Gets the return value of scan(), convert to a CSV row.
def to_rows(data):
return [[
data['url'],
len(data['external_domains']),
str.join(" | ", data['external_domains']),
str.join(" | ", data['external_urls']),
str.join(" | ", data['nearby_domains']),
str.join(" | ", data['nearby_urls']),
str.join(" | ", data['known_services']),
str.join(" | ", data['unknown_services'])
]]
headers = [
'Scanned URL',
'Number of External Domains',
'External Domains',
'External URLs',
'Nearby Domains',
'Nearby URLs',
'Known Services',
'Unknown Services'
]
| {"/gatherers/censys.py": ["/gatherers/gathererabc.py"], "/tests/test_scan_utils.py": ["/tests/context.py"], "/tests/test_utils.py": ["/tests/context.py"], "/gatherers/url.py": ["/gatherers/gathererabc.py"], "/tests/test_gatherers_censys.py": ["/tests/context.py"], "/tests/test_gatherers_rdns.py": ["/tests/context.py"], "/scanners/noopabc.py": ["/scanners/scannerabc.py"], "/gatherers/rdns.py": ["/gatherers/gathererabc.py"]} |
67,047 | cds-snc/domain-scan | refs/heads/master | /runner/runner.py | from utils import utils
def write_rows(rows, domain, base_domain, scanner, csv_writer, meta=None):
# If we didn't get any info, we'll still output information about why the scan failed.
if rows is None:
empty_row = [None] * len(scanner.headers)
rows = [empty_row]
# Always output Domain and Base Domain.
standard_prefix = [
domain,
base_domain,
]
# If requested, add local and Lambda scan data.
meta_fields = []
if meta:
meta_fields.append(" ".join(meta.get('errors', [])))
meta_fields.append(utils.utc_timestamp(meta.get("start_time")))
meta_fields.append(utils.utc_timestamp(meta.get("end_time")))
meta_fields.append(utils.just_microseconds(meta.get("duration")))
if meta.get("lambda") is not None:
meta_fields.append(meta['lambda'].get('request_id'))
meta_fields.append(meta['lambda'].get('log_group_name'))
meta_fields.append(meta['lambda'].get('log_stream_name'))
meta_fields.append(utils.utc_timestamp(meta['lambda'].get('start_time')))
meta_fields.append(utils.utc_timestamp(meta['lambda'].get('end_time')))
meta_fields.append(meta['lambda'].get('memory_limit'))
meta_fields.append(utils.just_microseconds(meta['lambda'].get('measured_duration')))
# Write out prefix, scan data, and meta scan data.
for row in rows:
csv_writer.writerow(standard_prefix + row + meta_fields)
| {"/gatherers/censys.py": ["/gatherers/gathererabc.py"], "/tests/test_scan_utils.py": ["/tests/context.py"], "/tests/test_utils.py": ["/tests/context.py"], "/gatherers/url.py": ["/gatherers/gathererabc.py"], "/tests/test_gatherers_censys.py": ["/tests/context.py"], "/tests/test_gatherers_rdns.py": ["/tests/context.py"], "/scanners/noopabc.py": ["/scanners/scannerabc.py"], "/gatherers/rdns.py": ["/gatherers/gathererabc.py"]} |
67,048 | cds-snc/domain-scan | refs/heads/master | /gatherers/url.py | import os
import logging
import requests
from gatherers.gathererabc import Gatherer
from utils import utils
class Gatherer(Gatherer):
def gather(self):
# Defaults to --url, but can be overridden.
name = self.extra.get("name", "url")
url = self.options.get(name)
if url is None:
logging.warn("A --url is required. (Can be a local path.)")
exit(1)
# remote URL
if url.startswith("http:") or url.startswith("https:"):
# Though it's saved in cache/, it will be downloaded every time.
remote_path = os.path.join(self.cache_dir, "url.csv")
try:
response = requests.get(url)
utils.write(response.text, remote_path)
except:
logging.error("Remote URL not downloaded successfully.")
print(utils.format_last_exception())
exit(1)
# local path
else:
remote_path = url
for domain in utils.load_domains(remote_path):
yield domain
| {"/gatherers/censys.py": ["/gatherers/gathererabc.py"], "/tests/test_scan_utils.py": ["/tests/context.py"], "/tests/test_utils.py": ["/tests/context.py"], "/gatherers/url.py": ["/gatherers/gathererabc.py"], "/tests/test_gatherers_censys.py": ["/tests/context.py"], "/tests/test_gatherers_rdns.py": ["/tests/context.py"], "/scanners/noopabc.py": ["/scanners/scannerabc.py"], "/gatherers/rdns.py": ["/gatherers/gathererabc.py"]} |
67,049 | cds-snc/domain-scan | refs/heads/master | /process_a11y/tests/test_process_a11y.py | import unittest
from .context import a11y # noqa
from a11y.process_a11y import A11yProcessor
class ProcessA11yTestCase(unittest.TestCase):
a11y_filename = 'tests/data/a11y.csv'
domain_filename = 'tests/data/domains.csv'
def setUp(self):
self.a11y = A11yProcessor(self.a11y_filename, self.domain_filename)
def test_branch_lookup(self):
branch_lookup = self.a11y.agency_to_branch
self.assertEqual(branch_lookup.get('U.S. Capitol Police'), 'Legislative')
self.assertEqual(branch_lookup.get('U.S Courts'), 'Judicial')
self.assertIsNone(branch_lookup.get('foo'))
def test_agency_lookup(self):
agency_lookup = self.a11y.domain_to_agency
self.assertEqual(agency_lookup.get('achp.gov'), 'Advisory Council on Historic Preservation')
self.assertEqual(agency_lookup.get('acus.gov'), 'Administrative Conference of the United States')
def test_row_cleaner(self):
clean = self.a11y.clean_row(self.a11y.a11y_raw[0])
self.assertEqual(clean['agency'], 'Administrative Conference of the United States')
self.assertEqual(clean['branch'], 'Executive')
self.assertEqual(clean['error'], 'Missing Image Descriptions')
self.assertEqual(
clean['error_details']['code'],
'WCAG2AA.Principle1.Guideline1_1.1_1_1.H30.2'
)
def test_error_lookup(self):
error_lookup = self.a11y.get_error_category
self.assertEqual(
error_lookup('WCAG2AA.Principle1.Guideline1_1.1_1_1.H30.2'),
'Missing Image Descriptions'
)
self.assertEqual(
error_lookup('other.error.Guideline123_456.1_1_1.H30.2'),
'Other Errors'
)
if __name__ == '__main__':
unittest.main()
| {"/gatherers/censys.py": ["/gatherers/gathererabc.py"], "/tests/test_scan_utils.py": ["/tests/context.py"], "/tests/test_utils.py": ["/tests/context.py"], "/gatherers/url.py": ["/gatherers/gathererabc.py"], "/tests/test_gatherers_censys.py": ["/tests/context.py"], "/tests/test_gatherers_rdns.py": ["/tests/context.py"], "/scanners/noopabc.py": ["/scanners/scannerabc.py"], "/gatherers/rdns.py": ["/gatherers/gathererabc.py"]} |
67,050 | cds-snc/domain-scan | refs/heads/master | /tests/test_gatherers_censys.py | import pytest
from .context import gatherers # noqa
from gatherers import censys
CENSYS_ONE_SUFFIX_QUERY = "\n".join([
"SELECT",
" parsed.subject.common_name,",
" parsed.extensions.subject_alt_name.dns_names",
"FROM",
" `censys-io.certificates_public.certificates`,",
" UNNEST(parsed.subject.common_name) AS common_names,",
" UNNEST(parsed.extensions.subject_alt_name.dns_names) AS sans",
"WHERE",
" (common_names LIKE \"%.gov\"",
" OR sans LIKE \"%.gov\")",
])
CENSYS_TWO_SUFFIX_QUERY = "\n".join([
"SELECT",
" parsed.subject.common_name,",
" parsed.extensions.subject_alt_name.dns_names",
"FROM",
" `censys-io.certificates_public.certificates`,",
" UNNEST(parsed.subject.common_name) AS common_names,",
" UNNEST(parsed.extensions.subject_alt_name.dns_names) AS sans",
"WHERE",
" (common_names LIKE \"%.gov\"",
" OR sans LIKE \"%.gov\")",
" OR (common_names LIKE \"%.fed.us\"",
" OR sans LIKE \"%.fed.us\")",
])
@pytest.mark.parametrize("suffixes,expected", [
(
[".gov"],
CENSYS_ONE_SUFFIX_QUERY
),
(
[".gov", ".fed.us"],
CENSYS_TWO_SUFFIX_QUERY
),
])
def test_query_for(suffixes, expected):
result = censys.query_for(suffixes)
assert result == expected
| {"/gatherers/censys.py": ["/gatherers/gathererabc.py"], "/tests/test_scan_utils.py": ["/tests/context.py"], "/tests/test_utils.py": ["/tests/context.py"], "/gatherers/url.py": ["/gatherers/gathererabc.py"], "/tests/test_gatherers_censys.py": ["/tests/context.py"], "/tests/test_gatherers_rdns.py": ["/tests/context.py"], "/scanners/noopabc.py": ["/scanners/scannerabc.py"], "/gatherers/rdns.py": ["/gatherers/gathererabc.py"]} |
67,051 | cds-snc/domain-scan | refs/heads/master | /gatherers/gathererabc.py | from abc import ABCMeta, abstractmethod
import os
from typing import List
class Gatherer(metaclass=ABCMeta):
def __init__(self, suffixes: List[str], options: dict, extra: dict={}):
self.suffixes = suffixes
self.options = options
self.extra = extra
self.report_dir = self.options.get("output", "./")
self.cache_dir = os.path.join(self.report_dir, "cache")
self.results_dir = os.path.join(self.report_dir, "results")
@abstractmethod
def gather(self):
pass
| {"/gatherers/censys.py": ["/gatherers/gathererabc.py"], "/tests/test_scan_utils.py": ["/tests/context.py"], "/tests/test_utils.py": ["/tests/context.py"], "/gatherers/url.py": ["/gatherers/gathererabc.py"], "/tests/test_gatherers_censys.py": ["/tests/context.py"], "/tests/test_gatherers_rdns.py": ["/tests/context.py"], "/scanners/noopabc.py": ["/scanners/scannerabc.py"], "/gatherers/rdns.py": ["/gatherers/gathererabc.py"]} |
67,052 | cds-snc/domain-scan | refs/heads/master | /scanners/headless/local_bridge.py | import logging
import json
from utils import scan_utils
###
# Local Python bridge to the JS bridge to the JS scanner.
# Shells out to run Node on base.js, which runs headless Chrome
# and then pulls in the scanner-specific JS scanning function.
# Resulting data is serialized and returned via STDOUT.
###
def headless_scan(scanner_name, domain, environment, options):
raw = scan_utils.scan(
[
"./scanners/headless/local_bridge.js",
scanner_name,
scan_utils.json_for({
'domain': domain,
'environment': environment,
'options': options
})
]
)
if not raw:
logging.warn("\tError calling out to %s.js, skipping." % scanner_name)
return None
try:
data = scan_utils.from_json(raw)
except json.decoder.JSONDecodeError:
logging.warn("\tError inside %s.js, skipping. Error below:\n\n%s" % (scanner_name, raw))
return None
return data
| {"/gatherers/censys.py": ["/gatherers/gathererabc.py"], "/tests/test_scan_utils.py": ["/tests/context.py"], "/tests/test_utils.py": ["/tests/context.py"], "/gatherers/url.py": ["/gatherers/gathererabc.py"], "/tests/test_gatherers_censys.py": ["/tests/context.py"], "/tests/test_gatherers_rdns.py": ["/tests/context.py"], "/scanners/noopabc.py": ["/scanners/scannerabc.py"], "/gatherers/rdns.py": ["/gatherers/gathererabc.py"]} |
67,053 | cds-snc/domain-scan | refs/heads/master | /scanners/a11y.py | import json
import logging
import os
import requests
import yaml
from utils import utils
workers = 3
pa11y = os.environ.get("PA11Y_PATH", "pa11y")
redirects = {}
config = ""
def init(environment, options):
global redirects
global config
cache_dir = options.get("_", {}).get("cache_dir", "./cache")
redirects_file = options.get("a11y_redirects")
config_file = options.get("a11y_config")
# Parse redirects
if redirects_file:
if not redirects_file.endswith(".yml"):
logging.error("--a11y_redirects should be a YML file")
return False
# if remote, try to download
if redirects_file.startswith("http:") or redirects_file.startswith("https:"):
redirects_path = os.path.join(cache_dir, "a11y_redirects.yml")
try:
response = requests.get(redirects_file)
utils.write(response.text, redirects_path)
except:
logging.error("--a11y_redirects URL not downloaded successfully.")
return False
# Otherwise, read it off the disk
else:
redirects_path = redirects_file
if (not os.path.exists(redirects_path)):
logging.error("--a11y_redirects file not found.")
return False
with open(redirects_path, 'r') as f:
redirects = yaml.load(f)
# Get config
if config_file:
if not config_file.endswith(".json"):
logging.error("--a11y_config should be a json file")
return False
# if remote, try to download
if config_file.startswith("http:") or config_file.startswith("https:"):
config_path = os.path.join(cache_dir, "a11y_config.json")
try:
response = requests.get(config_file)
utils.write(response.text, config_path)
except:
logging.error("--a11y_config URL not downloaded successfully.")
return False
config = config_path
return True
# If we have pshtt data, use it to skip some domains. If redirect
# data says so, adjust scan URL for some domains.
def init_domain(domain, environment, options):
cache_dir = options.get("_", {}).get("cache_dir", "./cache")
# If we've got pshtt data, use it to cut down work.
if (
utils.domain_is_redirect(domain, cache_dir=cache_dir) or
utils.domain_not_live(domain, cache_dir=cache_dir)
):
logging.debug("\tSkipping a11y scan based on pshtt data.")
return False
# Use redirect/blacklist data to adjust (or stop) scan URL.
url = get_url_to_scan(domain)
if not url:
logging.debug("\tSkipping a11y scan based on redirect/blacklist data.")
return False
# Send adjusted URL to scan function.
return {'url': url}
# Shell out to a11y and run the scan.
def scan(domain, environment, options):
url = environment.get("url", domain)
errors = run_a11y_scan(url)
return {
'url': url,
'errors': errors
}
def to_rows(data):
rows = []
for error in data['errors']:
rows.append([
data['url'],
error['typeCode'],
error['code'],
error['message'],
error['context'],
error['selector']
])
return rows
headers = [
"redirectedTo",
"typeCode",
"code",
"message",
"context",
"selector"
]
def run_a11y_scan(domain):
command = [pa11y, domain, "--reporter", "json", "--level", "none", "--timeout", "300000"]
if config:
command += ["--config", config]
raw = utils.scan(command)
if not raw or raw == '[]\n':
results = [{
'typeCode': '',
'code': '',
'message': '',
'context': '',
'selector': '',
'type': ''
}]
else:
results = json.loads(raw)
return results
def get_url_to_scan(domain):
global redirects
url_to_scan = None
# Redirects can be blacklists or redirects.
if domain in redirects:
# blacklist will force a domain to be skipped
if redirects[domain]['blacklist']:
url_to_scan = None
# otherwise, scan will be redirected to new location
else:
url_to_scan = redirects[domain]['redirect']
# Otherwise, leave domain alone.
else:
url_to_scan = domain
return url_to_scan
| {"/gatherers/censys.py": ["/gatherers/gathererabc.py"], "/tests/test_scan_utils.py": ["/tests/context.py"], "/tests/test_utils.py": ["/tests/context.py"], "/gatherers/url.py": ["/gatherers/gathererabc.py"], "/tests/test_gatherers_censys.py": ["/tests/context.py"], "/tests/test_gatherers_rdns.py": ["/tests/context.py"], "/scanners/noopabc.py": ["/scanners/scannerabc.py"], "/gatherers/rdns.py": ["/gatherers/gathererabc.py"]} |
67,054 | cds-snc/domain-scan | refs/heads/master | /scanners/missing_scanner.py | # This lives in the scanners directory and is purely for making
# testing easier.
command = "hopefully_not_a_real_command_on_some_system_somewhere"
| {"/gatherers/censys.py": ["/gatherers/gathererabc.py"], "/tests/test_scan_utils.py": ["/tests/context.py"], "/tests/test_utils.py": ["/tests/context.py"], "/gatherers/url.py": ["/gatherers/gathererabc.py"], "/tests/test_gatherers_censys.py": ["/tests/context.py"], "/tests/test_gatherers_rdns.py": ["/tests/context.py"], "/scanners/noopabc.py": ["/scanners/scannerabc.py"], "/gatherers/rdns.py": ["/gatherers/gathererabc.py"]} |
67,055 | cds-snc/domain-scan | refs/heads/master | /tests/test_gatherers_rdns.py | import pytest
from .context import gatherers # noqa
from gatherers import rdns
@pytest.mark.parametrize("data,expected", [
(
[
'{"value": "18f.gov"}',
'{"value": "123.112.18f.gov"}',
'{"value": "123.112.23.23"}',
'{"value": "u-123.112.23.23"}',
'{"value": "123.112.fed.us"}',
'{"value": "something.fed.us"}',
'{"value": "18f.gsa.gov"}',
'{"timestamp":"1510189589","name":"148.165.34.19","value":"www.bart.gov","type":"ptr"}',
'{"timestamp":"1510189590","name":"166.2.164.127","value":"z-166-2-164-127.ip.fs.fed.us","type":"ptr"}',
'{"timestamp":"1510189590","name":"199.131.187.116","value":"z-199-131-187-116.ip.fs.fed.us","type":"ptr"}',
'{"timestamp":"1510189590","name":"199.156.215.172","value":"199.156.215.172.4k.usda.gov","type":"ptr"}',
'{"timestamp":"1510189591","name":"137.79.24.39","value":"wildcard.jpl.nasa.gov","type":"ptr"}',
'{"timestamp":"1510189591","name":"152.132.2.60","value":"152-132-2-60.tic.va.gov","type":"ptr"}',
'{"timestamp":"1510189591","name":"166.3.217.20","value":"z-166-3-217-20.ip.fs.fed.us","type":"ptr"}',
'{"timestamp":"1510189591","name":"167.253.203.215","value":"167-253-203-215-gov.emcbc.doe.gov","type":"ptr"}',
'{"timestamp":"1510189591","name":"199.153.160.221","value":"199.153.160.221.4k.usda.gov","type":"ptr"}',
'{"timestamp":"1510189592","name":"140.215.230.154","value":"140-215-230-154.usbr.gov","type":"ptr"}',
'{"timestamp":"1510189593","name":"166.6.157.98","value":"z-166-6-157-98.ip.fs.fed.us","type":"ptr"}',
'{"timestamp":"1510189595","name":"130.20.175.6","value":"130.20.175.6.pnnl.gov","type":"ptr"}',
'{"timestamp":"1510189595","name":"199.149.248.138","value":"199.149.248.138.4k.usda.gov","type":"ptr"}',
'{"timestamp":"1510189595","name":"199.159.207.25","value":"199.159.207.25.4k.usda.gov","type":"ptr"}',
'{"timestamp":"1510189596","name":"199.145.148.196","value":"199.145.148.196.4k.usda.gov","type":"ptr"}',
'{"timestamp":"1510189597","name":"159.142.211.155","value":"host.159-142-211-155.gsa.gov","type":"ptr"}',
'{"timestamp":"1510189597","name":"159.189.28.97","value":"u-159-189-28-97.xr.usgs.gov","type":"ptr"}',
'{"timestamp":"1510189598","name":"139.169.172.113","value":"host.jsc.nasa.gov","type":"ptr"}',
'{"timestamp":"1510189599","name":"134.67.230.238","value":"unassigned.epa.gov","type":"ptr"}',
'{"timestamp":"1510189600","name":"130.118.135.187","value":"u-130-118-135-187.xr.usgs.gov","type":"ptr"}',
'{"timestamp":"1510189600","name":"140.214.229.183","value":"140-214-229-183.usbr.gov","type":"ptr"}',
'{"timestamp":"1510189600","name":"199.148.94.97","value":"199.148.94.97.4k.usda.gov","type":"ptr"}',
'{"timestamp":"1510189601","name":"170.144.139.133","value":"z-170-144-139-133.ip.fs.fed.us","type":"ptr"}',
],
[
"18f.gov",
"something.fed.us",
"18f.gsa.gov",
"www.bart.gov",
"wildcard.jpl.nasa.gov",
# "host.159-142-211-155.gsa.gov", TODO: currently gets stripped, but should it?
"host.jsc.nasa.gov",
"unassigned.epa.gov",
]
),
])
def test_query_for(data, expected):
result = rdns.process_lines(data, rdns.ip_filter, rdns.number_filter)
assert list(result) == expected
| {"/gatherers/censys.py": ["/gatherers/gathererabc.py"], "/tests/test_scan_utils.py": ["/tests/context.py"], "/tests/test_utils.py": ["/tests/context.py"], "/gatherers/url.py": ["/gatherers/gathererabc.py"], "/tests/test_gatherers_censys.py": ["/tests/context.py"], "/tests/test_gatherers_rdns.py": ["/tests/context.py"], "/scanners/noopabc.py": ["/scanners/scannerabc.py"], "/gatherers/rdns.py": ["/gatherers/gathererabc.py"]} |
67,056 | cds-snc/domain-scan | refs/heads/master | /scanners/noopabc.py | import logging
from typing import List
from scanners.scannerabc import ScannerABC
###
# Testing scan class. Does nothing time consuming or destructive,
# but exercises many of the main hooks of domain-scan.
class Scanner(ScannerABC):
# CSV headers for each row of data. Referenced locally.
headers = ["Completed", "Constant", "Variable"]
# Set a default number of workers for a particular scan type.
# Overridden by a --workers flag.
workers = 2 # type: int
def __init__(self, environment: dict, options: dict) -> None:
# The overall scanner options are set here.
# Per-domain arguments should be passed to ``.scan()``.
#
# Run locally.
logging.debug("Subclass (%s) __init__ method." % self.__module__)
logging.debug("Initialize environment method.")
self.initialized_opts = environment
self.initialized_opts["constant"] = 12345
super().__init__(environment, options)
def scan(self, domain: str) -> dict:
# Required scan function. This is the meat of the scanner, where things
# that use the network or are otherwise expensive would go.
#
# Runs locally or in the cloud (Lambda).
logging.debug("Scan function called with options: %s" % self.options)
# Perform the "task".
complete = True
logging.warn("Complete!")
return {
'complete': complete,
'constant': self.environment.get('constant'),
'variable': self.environment.get('variable')
}
def to_rows(self, data) -> List[List[str]]:
# CSV headers for each row of data, e.g.
# ["Completed", "Constant", "Variable"]
return [
[data['complete'], data['constant'], data['variable']]
]
| {"/gatherers/censys.py": ["/gatherers/gathererabc.py"], "/tests/test_scan_utils.py": ["/tests/context.py"], "/tests/test_utils.py": ["/tests/context.py"], "/gatherers/url.py": ["/gatherers/gathererabc.py"], "/tests/test_gatherers_censys.py": ["/tests/context.py"], "/tests/test_gatherers_rdns.py": ["/tests/context.py"], "/scanners/noopabc.py": ["/scanners/scannerabc.py"], "/gatherers/rdns.py": ["/gatherers/gathererabc.py"]} |
67,057 | cds-snc/domain-scan | refs/heads/master | /tests/test_scanners_analytics.py | import pytest
from argparse import ArgumentTypeError
from scanners import analytics
@pytest.mark.parametrize("opts,args,correct_opts, correct_unknown", [
(
["--analytics", "tests/data/domains.csv"],
{},
{"analytics_domains": ["achp.gov", "acus.gov"]},
[],
),
(
["--noop-delay", "4", "--analytics", "tests/data/domains.csv"],
{"something": "else"},
{
"analytics_domains": ["achp.gov", "acus.gov"],
},
["--noop-delay", "4"],
),
])
def test_handle_scanner_args(args, opts, correct_opts, correct_unknown):
# This only handles a basic case and makes sure it's handed off correctly;
# tests for the scanner argument parsers themselves should be in the tests
# for those scanners.
opts, unknown = analytics.handle_scanner_args(opts, args)
# pytest.set_trace()
assert opts == correct_opts
assert unknown == correct_unknown
@pytest.mark.parametrize("opts,args,correct_opts, correct_unknown", [
(
["--analytics", "tests/data/domains.tsv"],
{},
{"analytics_domains": ["achp.gov", "acus.gov"]},
[],
),
])
@pytest.mark.xfail(raises=ArgumentTypeError)
def test_handle_scanner_args_notcsv(args, opts, correct_opts, correct_unknown):
# This only handles a basic case and makes sure it's handed off correctly;
# tests for the scanner argument parsers themselves should be in the tests
# for those scanners.
opts, unknown = analytics.handle_scanner_args(opts, args)
assert opts == correct_opts
assert unknown == correct_unknown
@pytest.mark.parametrize("opts,args,correct_opts, correct_unknown", [
(
["--noop-delay", "4", "--analytics", "path/to/nowhere.csv"],
{"something": "else"},
{
"analytics_domains": ["achp.gov", "acus.gov"],
},
["--noop-delay", "4"],
),
])
@pytest.mark.xfail(raises=FileNotFoundError)
def test_handle_scanner_args_fnf(args, opts, correct_opts, correct_unknown):
# This only handles a basic case and makes sure it's handed off correctly;
# tests for the scanner argument parsers themselves should be in the tests
# for those scanners.
opts, unknown = analytics.handle_scanner_args(opts, args)
assert opts == correct_opts
assert unknown == correct_unknown
| {"/gatherers/censys.py": ["/gatherers/gathererabc.py"], "/tests/test_scan_utils.py": ["/tests/context.py"], "/tests/test_utils.py": ["/tests/context.py"], "/gatherers/url.py": ["/gatherers/gathererabc.py"], "/tests/test_gatherers_censys.py": ["/tests/context.py"], "/tests/test_gatherers_rdns.py": ["/tests/context.py"], "/scanners/noopabc.py": ["/scanners/scannerabc.py"], "/gatherers/rdns.py": ["/gatherers/gathererabc.py"]} |
67,058 | cds-snc/domain-scan | refs/heads/master | /gatherers/rdns.py | import json
import logging
import re
from typing import Generator, List, Pattern
from gatherers.gathererabc import Gatherer
# Reverse DNS
#
# Given a path to a (local) "JSON Lines" formatted file,
# based on Rapid7's Reverse DNS data, pull out the domains
# that match the given suffixes.
#
# Bearing in mind that the gathering system currently loads
# all domains into memory in order to dedupe them, it may be
# easiest to use this on a file that has been pre-filtered in
# some way (such as by grepping for the intended suffix).
# Best-effort filter for hostnames which are just reflected IPs.
# IP addresses often use dots or dashes.
# Some also start with "u-" before the IP address.
ip_filter = re.compile("^(\w+[\-\.]?)?\d+[\-\.]\d+[\-\.]\d+[\-\.]\d+")
# Best-effort filter for hostnames with just numbers on the base domain.
# (Note: this won't work for fed.us subdomains, but that's okay, this
# is just a best-effort to cut down noise.)
number_filter = re.compile("^[\d\-]+\.")
class Gatherer(Gatherer):
def gather(self):
path = self.options.get("rdns")
if path is None:
logging.warn("--rdns is required to be a path to a local file.")
exit(1)
# May become useful to allow URLs in future.
if path.startswith("http:") or path.startswith("https:"):
logging.warn("--rdns is required to be a path to a local file.")
exit(1)
with open(path) as lines:
logging.debug("\tReading %s..." % path)
for record in process_lines(lines, ip_filter, number_filter):
yield record
def process_lines(lines: List[str], ip_filter: Pattern,
number_filter: Pattern) -> Generator[str, str, None]:
for line in lines:
record = json.loads(line)
# logging.debug("\t%s" % record["value"])
# Filter out IP-like reflected addresses.
is_ip = (ip_filter.search(record["value"]) is not None)
# Check if it's just something like '1234.what.ever.gov'
is_number = (number_filter.search(record["value"]) is not None)
if (not is_ip) and (not is_number):
yield record["value"]
| {"/gatherers/censys.py": ["/gatherers/gathererabc.py"], "/tests/test_scan_utils.py": ["/tests/context.py"], "/tests/test_utils.py": ["/tests/context.py"], "/gatherers/url.py": ["/gatherers/gathererabc.py"], "/tests/test_gatherers_censys.py": ["/tests/context.py"], "/tests/test_gatherers_rdns.py": ["/tests/context.py"], "/scanners/noopabc.py": ["/scanners/scannerabc.py"], "/gatherers/rdns.py": ["/gatherers/gathererabc.py"]} |
67,059 | ZEYINWU/Singel-Cell | refs/heads/master | /project.py | import numpy as np
import scipy.io as sio
def projsplx(y,x):
m = y.shape[0]
n = y.shape[1]
s = np.repeat(0.0,m).reshape(m,1)
vs = np.repeat(0.0,m).reshape(m,1)
for k in range(n):
means = 0.0
mins = 100000.0
for j in range(m):
s[j,0] = y[j,k]
means = means + s[j,0]
if mins > s[j,0]:
mins = s[j,0]
for j in range(m):
s[j,0] = s[j,0] - (means - 1.0)/m
ft = 1;
if mins<0:
f = 1
lambda_m = 0
while (np.absolute(f) > 1e-10):
npos = 0
f = 0
for j in range(m):
vs[j,0] = s[j,0] - lambda_m
if vs[j,0] >0 :
npos = npos + 1
f = f+ vs[j,0]
lambda_m = lambda_m + (f-1)/npos
if ft>100 :
for j in range(m):
if vs[j,0] > 0:
x[j,k] = vs[j,0]
else:
x[j,k] = 0
break
ft = ft + 1
for j in range(m):
if vs[j,0]>0 :
x[j,k] = vs[j,0]
else:
x[j,k] = 0
else:
for j in range(m):
x[j,k] = s[j,0]
return x.T
if __name__ == "__main__":
data = sio.loadmat('/Users/Zeyin/Desktop/Study/Bioinformatics/SIMLR-SIMLR/data/Test_1_mECS.mat')
data1 = data["in_X"]
data1 = data1[0:20,0:20].T
x=data1
y=-data1
res = projsplx(x,y)
print(res)
| {"/SILMR.py": ["/network.py", "/multikernel.py", "/util_silmr.py", "/tsne.py", "/project.py"], "/demo.py": ["/network.py", "/multikernel.py", "/util_silmr.py", "/tsne.py", "/project.py", "/SILMR.py"]} |
67,060 | ZEYINWU/Singel-Cell | refs/heads/master | /util_silmr.py | import scipy.io as sio
import sys
import numpy as np
from numpy import linalg as LA
def eig1(A,c,isMax):
if c>A.shape[0]:
c = A.shape[0]
isSym = 1
if isSym==1:
eigen_A = LA.eigh(A)
else:
eigen_A = LA.eig(A)
v = eigen_A[1]
d = eigen_A[0]
if isMax==0:
eigen_A_sorted = np.sort(d)
d1 = eigen_A_sorted
idx = np.argsort(d)
else:
eigen_A_sorted = -np.sort(-d)
d1 = eigen_A_sorted
idx = np.argsort(-d)
idx1 = idx[0:c]
eigval = d[idx1]
eigvec = np.real(v[:,idx1])
eigval_full = d[idx]
res = list()
res.append(eigval)
res.append(eigvec)
res.append(eigval_full)
return res
def L2_distance_1(a,b):
if a.shape[0]==1:
a = np.vstack((a,np.repeat(0,a.shape[1])))
b = np.vstack((b,np.repeat(0,b.shape[1])))
aa = np.matrix(np.sum(np.array(a)*np.array(a),axis = 0))
bb = np.matrix(np.sum(np.array(b)*np.array(b),axis = 0))
#print(a)
ab = np.dot(np.matrix(a.T),np.matrix(b))
d1 = np.repeat(0.0,aa.shape[0]*aa.shape[1]*bb.shape[0]*bb.shape[1]).reshape(aa.shape[0]*aa.shape[1],bb.shape[0]*bb.shape[1])
for i in range(bb.shape[0]*bb.shape[1]):
d1[:,i] = np.array(aa)[0];
d2 = np.repeat(0.0,aa.shape[0]*aa.shape[1]*bb.shape[0]*bb.shape[1]).reshape(bb.shape[0]*bb.shape[1],aa.shape[0]*aa.shape[1])
for i in range(aa.shape[0]*aa.shape[1]):
d2[i,:] = np.array(bb)[0];
d = d1+d2 - 2*ab
d = np.real(d)
dd= np.repeat(0.0,d.shape[0]*d.shape[1]).reshape(d.shape[0],d.shape[1])
for i in range(d.shape[0]):
for j in range(d.shape[1]):
dd[i,j] = max(d[i,j],0)
d = dd
d_eye = np.repeat(1.0,d.shape[0]*d.shape[1]).reshape(d.shape[0],d.shape[1])
np.fill_diagonal(d_eye,0)
d = np.array(d) * np.array(d_eye)
return d
def umkl(D):
beta = 1.0 / (D.shape[0]*D.shape[1])
tol = 1e-4
u = 20.0
logU = np.log(u)
#compute Hbeta
res_hbeta = Hbeta(D,beta)
H = res_hbeta[0]
thisP = res_hbeta[1]
betamin = -214748365.0
betamax = 214748365
Hdiff = H -logU
tries = 0.0
#print(Hdiff)
while(np.absolute(Hdiff) > tol and tries<30.0):
#if not, increase or decrease precision
if Hdiff>0 :
betamin = beta
if np.absolute(betamax)==214748365.0:
np.set_printoptions(precision=3)
beta = beta * 2.0
else:
np.set_printoptions(precision=9)
beta = (beta + betamax)/2.0
else:
np.set_printoptions(precision=9)
betamax = beta
if np.absolute(betamin)==-214748365.0:
beta = beta * 2.0
else:
np.set_printoptions(precision=9)
beta = (beta + betamin)/2.0
#print(beta)
#raise ValueError(" ")
np.set_printoptions(precision=9)
res_hbeta = Hbeta(D,beta)
H = res_hbeta[0]
thisP = res_hbeta[1]
Hdiff = H-logU
tries = tries +1.0
return thisP
def Hbeta(D,beta):
D = (D - np.min(D))/(np.max(D)- np.min(D) + np.finfo(float).eps)
#print("D")
#print(D)
P = np.exp(-D * beta)
#print("P")
#print(P)
sumP = np.sum(P)
#print("sumP")
#print(sumP)
H = np.log(sumP) + beta * np.sum(np.multiply(D,P)) / sumP
P=P/sumP
# bb=False
# for i in range(P.shape[1]):
# P[0,i] = P[0,i]/sumP
# if np.isnan(P[0,i]):
# bb= True
# if bb==True:
# for i in range(P.shape[1]):
# if np.isnan(P[0,i]):
# P[0,i] = 0
# else:
# P[0,i] = 1
res=list()
res.append(H)
res.append(P)
return res
if __name__=='__main__':
data = sio.loadmat('/Users/Zeyin/Desktop/Study/Bioinformatics/SIMLR-SIMLR/data/Test_1_mECS.mat')
data1 = data["in_X"]
data1 = data1[0:13,0:13].T
#print(data1)
x=np.matrix("0.00015559521874565727, 0.00021246840656371493, 0.00030423172625916806, 0.00046148705046888028, 0.00075122056866200521")
print (umkl(x))
| {"/SILMR.py": ["/network.py", "/multikernel.py", "/util_silmr.py", "/tsne.py", "/project.py"], "/demo.py": ["/network.py", "/multikernel.py", "/util_silmr.py", "/tsne.py", "/project.py", "/SILMR.py"]} |
67,061 | ZEYINWU/Singel-Cell | refs/heads/master | /tsne.py | import numpy as np
import scipy.io as sio
import random
def tsne(X,k,initial_config):
max_iter = 3000
min_cost = 0.0
epoch = 100
momentum = 0.8
final_momentum = 0.7
mom_switch_iter = 250
epsilon = 500.0
min_gain = 0.01
initial_P_gain = 4.0
n = (X.shape[0])
eps = np.finfo(float).eps
#if np.isnan(initial_config):
# ydata = initial_config
# initial_P_gain = 1
#else:
# ydata = np.matrix(np.random.normal(0.0,1.0,k*n)).reshape(n,k)
#print("ydata is ")
#print(ydata)
# ydata = np.matrix("0.6571193, -1.9158855, -0.2965425;0.3868631, 0.7200120, 0.4099676;0.9175513, -0.8469215, 0.5158201")
#if (initial_config) is not None:
# ydata = initial_config
# initial_P_gain = 1
# else:
if initial_config is not None and initial_config.shape[0]!=1:
ydata = initial_config
initial_P_gain = 1
# ydata = np.matrix(np.random.normal(0.0,1.0,k*n)).reshape(n,k)
# ydata=np.matrix("-1.03811316 , 0.2717166 , 0.6244905, 1.6301632 , 0.1038819;1.39669953, -0.5014884 , 0.7745979 , 1.2951793, -0.6959245; 0.61294758 ,-0.3976206 , 0.0395659, 1.7197544 ,-0.5571205;-1.07945380 , 0.3464190, 1.5074215, -0.1667624, -0.6581690; 0.47860815 , 0.9104152, -0.1426967, -0.1028931 ,-1.1953642; 0.13212600 , 1.0683870,-1.3965235, 0.6636401, -1.6036982; -0.07440915 ,-0.7048408 ,-1.3499397, -1.3138075 , 1.0891606; -1.40368928 , 0.5488038 , 0.8437093 ,-0.9947427, -0.7602520;-1.30680392 , 0.4817890 ,-0.4378943 ,-1.2450627 ,-0.3726204;-0.21070500 , 0.4678479, 0.4290807, 0.6702021, 0.1439408;0.66847317, -0.92297959, -1.15694119, -0.71589809, 0.3469709;0.64940661, -1.74822941, -0.13348036, 0.55788373, 0.45194675;1.16919698, 0.51906695, 0.08935842, 0.4436435 , -1.34942478")
else:
ydata = np.matrix(np.random.normal(0.0,1.0,k*n)).reshape(n,k)
#print(ydata)
P = X
P = 0.5 * (P+P.T)
P[P<eps] = eps
P = P / np.sum(P)
P = P * initial_P_gain
#print(ydata)
grads = np.repeat(0.0,ydata.shape[0]*ydata.shape[1]).reshape(ydata.shape[0],ydata.shape[1])
incs = np.repeat(0.0,ydata.shape[0]*ydata.shape[1]).reshape(ydata.shape[0],ydata.shape[1])
gains = np.repeat(1.0,ydata.shape[0]*ydata.shape[1]).reshape(ydata.shape[0],ydata.shape[1])
#print("P is ")
#print(P)
#print(ydata)
Q = P
for iter in range(max_iter):
if iter % epoch ==0.0:
cost = np.sum(np.sum(np.array(P)*np.array(np.log((P+eps)/(Q+eps))),axis=1))
print("Iteration #"+str(iter) + ": cost is " +str(cost))
if cost < min_cost :
break
sum_ydata = np.sum(np.multiply(np.array(ydata),np.array(ydata)),axis=1)
num = 1.0 /(1.0 + np.matrix(sum_ydata).T + (np.dot(np.matrix(-2 * ydata),np.matrix(ydata.T))+np.matrix(sum_ydata)))
np.fill_diagonal(num,0.0)
Q =num / np.sum(np.sum(num)) ################adjust with 0.45
Q[Q<eps] = eps
stiffnesses = np.array(P-Q) * np.array(num)
mo = np.repeat(0.0,stiffnesses.shape[0]*stiffnesses.shape[1]).reshape(stiffnesses.shape[0],stiffnesses.shape[1])
np.fill_diagonal(mo,np.sum(stiffnesses,axis=0))
grads = 4*np.dot(np.matrix((mo - stiffnesses)),np.matrix(ydata))
check1 = (np.sign(grads) != np.sign(incs))
check1[check1==True] = 1.0
check1[check1==False] =0.0
check2 = (np.sign(grads) == np.sign(incs))
check2[check2==True] = 1.0
check2[check2==False] =0.0
gains = np.array(gains + 0.2) * np.array(check1) + np.array(gains) * 0.8 *np.array(check2)
gains[gains < min_gain] = min_gain
incs = (momentum) * (incs) - epsilon * (np.array(gains)*np.array(grads))
ydata = ydata + incs
ydata = ydata - np.mean(ydata,axis=0)
ydata[ydata < -100] = -100.0
ydata[ydata > 100] = 100.0
if iter == mom_switch_iter :
momentum = final_momentum
if iter == 100.0 and initial_config is None:
P = P/4.0
return ydata
if __name__ == "__main__":
data = sio.loadmat('/Users/Zeyin/Desktop/Study/Bioinformatics/SIMLR-SIMLR/data/Test_1_mECS.mat')
data1 = data["in_X"]
data1 = data1[0:10,0:10].T
X = np.matrix("1,2,3;4,5,6;7,8,9")
k=3
initial_config = np.matrix(np.repeat(5,5))
res = tsne(np.matrix(data1),5,initial_config)
print res
| {"/SILMR.py": ["/network.py", "/multikernel.py", "/util_silmr.py", "/tsne.py", "/project.py"], "/demo.py": ["/network.py", "/multikernel.py", "/util_silmr.py", "/tsne.py", "/project.py", "/SILMR.py"]} |
67,062 | ZEYINWU/Singel-Cell | refs/heads/master | /network.py | import scipy.io as sio
import numpy as np
import math
from numpy import linalg as LA
def networkDiffusion(A,K):
np.fill_diagonal(A,0)
sign_A = A
sign_A = sign_A>0
sign_A = (sign_A -1) + sign_A
P = np.array(dominateSet((np.absolute(A)),min(K,A.shape[0]-1))) * np.array(sign_A)
DD = np.sum(np.absolute(P),axis=1)
np.fill_diagonal(P,DD+1)
#print("P is ")
P = transitionFields(P)
#print(P)
eigen_P = LA.eig(P)
U = eigen_P[1]
D = eigen_P[0]
#print(D)
d=(D + np.finfo(float).eps).real
alpha = 0.8
beta = 2
#print(1.0 - alpha*(np.array(d)**beta))
d = ((1.0-alpha)*d) / (1.0 - alpha*(np.array(d)**beta))
l = np.matrix(d).shape[0] * np.matrix(d).shape[1]
D = np.repeat(0.0,l*l).reshape(l,l)
np.fill_diagonal(D,(d.real))
#print(D)
W= np.dot(np.dot(np.matrix(U),np.matrix(D)),np.matrix(U.T))
diagonal_matrix = np.matrix(np.repeat(0.0,W.shape[0]*W.shape[1]).reshape(W.shape[0],W.shape[1]))
np.fill_diagonal(diagonal_matrix,1)
divider = np.repeat(0.0,W.shape[0]*W.shape[1]).reshape(W.shape[0],W.shape[1])
for i in range(W.shape[1]):
divider[:,i] = 1-np.diag(W)
W = (np.array(W) * np.array((1.0 - diagonal_matrix)))/ divider
#print(divider)
di = np.diag(D)
np.fill_diagonal(D,di[::-1])
W = np.dot(np.matrix(np.diag(DD)),np.matrix(W))
W = (W + W.T) / 2.0
W[W<0] = 0
return W
##input is a matrix
def dominateSet(affMatrix, NR_OF_KNN):
PNN_matrix = np.repeat(0.0,affMatrix.shape[0]*affMatrix.shape[1]).reshape(affMatrix.shape[0],affMatrix.shape[1])
res_sort = np.sort(-(affMatrix),axis=1)
res_sort = -res_sort
res_sort_indices = np.argsort(-(affMatrix),axis=1)
res = res_sort[:,0:NR_OF_KNN]
inds = np.repeat(0,affMatrix.shape[0]*NR_OF_KNN).reshape(affMatrix.shape[0],NR_OF_KNN)
for i in range(NR_OF_KNN):
inds[:,i] = np.arange(affMatrix.shape[0])
#print(inds)
loc = res_sort_indices[:,0:NR_OF_KNN]
#print(loc)
#print(asvectorCol((np.matrix(loc))))
indices = (asvectorCol(np.matrix(loc))-1)[0] * (affMatrix.shape[0]) + (asvectorCol(inds))[0]
#print(indices)
PNN = (asvectorCol(PNN_matrix))
#print(PNN)
resvector = (asvectorCol(res))
#print(resvector)
k=0
row = asvectorCol(np.matrix(loc))[0]
#print(row)
col = (asvectorCol(inds))
#print(col)
#print(resvector)
for i in range(len(row)):
PNN_matrix[row[i],col[i]] = resvector[k]
k=k+1
#print(PNN_matrix)
PNN_matrix = PNN_matrix.T
#print(PNN_matrix)
PNN_matrix = (PNN_matrix + PNN_matrix.T)/2.0
#print(PNN_matrix)
return np.matrix(PNN_matrix)
def asvectorCol(w):
v = w[:,0]
for i in range(w.shape[1]-1):
v = np.concatenate((v,w[:,i+1]),axis=0)
return np.array(v.T)
def asvectorRow(w):
v=w[0,:]
for i in range(w.shape[0]-1):
v = np.concatenate((v,w[i+1,:]),axis=0)
return v
def transitionFields(W):
zero_index = np.where(np.sum(W,axis=1)==0)[0]
#print("zero_index is ")
#print(zero_index)
W = dn(W,"ave")
w = np.sqrt(np.sum(np.absolute(W),axis=0)+np.finfo(float).eps)
divider = np.repeat(0.0,W.shape[0]*W.shape[1]).reshape(W.shape[0],W.shape[1])
for i in range(divider.shape[1]):
divider[:,i] = w
divider = np.matrix(divider).T
W = W / divider
W = np.dot(W,W.T)
W[zero_index,:] = 0
W[:,zero_index] = 0
return W
def dn(w,type):
D = np.sum(w,axis = 0)
if type=="ave":
D = np.matrix(1.0/D)
lengthD = D.shape[0] * D.shape[1]
D_temp = np.repeat(0.0,lengthD*lengthD).reshape(lengthD,lengthD)
k=0
for i in range(D.shape[0]):
for j in range(D.shape[1]):
D_temp[k,k] = D[i,j]
k=k+1
D = D_temp
wn = np.dot(D,w)
elif type=="gph":
D = 1.0/ np.sqrt(D)
lengthD = D.shape[0] * D.shape[1]
D_temp = np.repeat(0.0,lengthD*lengthD).reshape(lengthD,lengthD)
k=0
for i in range(D.shape[0]):
for j in range(D.shape[1]):
D_temp[k,k] = D[i,j]
k=k+1
D = D_temp
wn = np.dot(np.matrix(D),np.dot(np.matrix(w),np.matrix(D)))
return wn
if __name__=='__main__':
data = sio.loadmat('/Users/Zeyin/Desktop/Study/Bioinformatics/SIMLR-SIMLR/data/Test_1_mECS.mat')
data1 = data["in_X"]
data1 = data1[0:20,0:20].T
print(dn(data1,"ave"))
| {"/SILMR.py": ["/network.py", "/multikernel.py", "/util_silmr.py", "/tsne.py", "/project.py"], "/demo.py": ["/network.py", "/multikernel.py", "/util_silmr.py", "/tsne.py", "/project.py", "/SILMR.py"]} |
67,063 | ZEYINWU/Singel-Cell | refs/heads/master | /readdata.py | import numpy as np
data = open("/Users/Zeyin/Desktop/Study/Bioinformatics/project/data1.txt",'r')
f=data.next()
k=0
res = list()
for line in data:
if(k==4):
break
s = line.split(" ")
l = len(s)
curl = list()
for i in range(1,l):
curl.append(float(s[i]))
res.append(res)
k = k+1
data.close()
res = np.matrix(res)
print(res)
| {"/SILMR.py": ["/network.py", "/multikernel.py", "/util_silmr.py", "/tsne.py", "/project.py"], "/demo.py": ["/network.py", "/multikernel.py", "/util_silmr.py", "/tsne.py", "/project.py", "/SILMR.py"]} |
67,064 | ZEYINWU/Singel-Cell | refs/heads/master | /multikernel.py | import numpy as np
from scipy.stats import norm
import math
from handythread import foreach
import scipy.io as sio
## input: matrix x, and integer thread
def multiKernel(x,threads):
kernelType = list();
kernelType.append("poly");
kernelParams = list();
kernelParams.append(0);
N = x.shape[0];
KK= 0;
sigma = np.arange(2,0.8,-0.25);
#compute the combinded kernels
d = dist2(x,x);
Diff = np.multiply(d,d);
Diff_sort = (np.sort(Diff,axis=0)).T;
# compute the combined kernels
m = Diff.shape[0];
n = Diff.shape[1];
allk = np.arange(10,32,2);
global D_Kernels;
D_Kernels = list();
def dkernels(l,x_fun=x,Diff_sort_fun=Diff_sort,allk_fun=allk,Diff_fun=Diff,sigma_fun=sigma,KK_fun=KK):
if allk_fun[l]<((x_fun.shape[0])-1):
TT = np.mean(Diff_sort_fun[:,np.arange(1,allk_fun[l]+1)],axis=1);
TT = np.add(TT,np.finfo(float).eps);
length = TT.shape[0]
Sig = TT[:,0]
for i in range(length-1):
Sig = np.concatenate((Sig,Sig[:,0]),axis = 1)
Sig = np.add(Sig,Sig.T);
Sig = np.multiply(Sig,0.5);
#print(Sig)
Sig_valid = Sig > np.finfo(float).eps;
Sig = np.add(np.multiply(Sig,Sig_valid),np.finfo(float).eps);
for j in range(len(sigma_fun)):
W = norm.pdf(Diff_fun,0,np.multiply(sigma_fun[j],Sig));
D_Kernels.insert(KK_fun +l+j,np.matrix(np.multiply(np.add(W,W.T),0.5)))
r = range(len(allk));
foreach(dkernels,r,threads=threads);
#print(D-Kernels)
for i in range(len(D_Kernels)):
K= D_Kernels[i];
k = 1.0 / np.sqrt(np.diagonal(K)+1)
G = np.array(K) * np.array(np.dot(np.matrix(k).T,np.matrix(k)))
diag = np.diag(G)
G1 = np.repeat(0.0,(len(diag)*len(diag))).reshape(len(diag),len(diag))
for j in range(len(diag)):
G1[:,j] = diag
G2 = G1.T
D_Kernels_tmp = (np.array(G1)+np.array(G2) - 2.0*np.array(G)) / 2.0
lll = len(np.diag(D_Kernels_tmp))
newd = np.repeat(0.0,lll*lll).reshape(lll,lll)
np.fill_diagonal(newd,np.diag(D_Kernels_tmp)) #### return void
D_Kernels_tmp = np.array(D_Kernels_tmp) - np.array(newd)
D_Kernels[i] = D_Kernels_tmp;
return D_Kernels;
def dist2(x,c):
n1 = x.shape[0];
d1 = x.shape[1];
n2 = c.shape[0];
d2 = c.shape[1];
A = np.matrix(np.repeat(1,n2)).T;
B = np.sum(np.multiply(x,x).T,axis=0);
C = np.matrix(np.repeat(1,n1)).T;
D = np.sum(np.multiply(c,c).T,axis=0) ;
E = np.multiply(np.dot(x,c.T),-2);
F = np.dot(np.matrix(A),np.matrix(B)).T
G = np.dot(np.matrix(C),np.matrix(D))
dist = np.add(np.add(F,G),E)
return dist ;
if __name__=='__main__':
data = sio.loadmat('/Users/Zeyin/Desktop/Study/Bioinformatics/SIMLR-SIMLR/data/Test_1_mECS.mat')
data1 = data["in_X"]
data1 = data1[0:13,0:13].T
test = np.matrix(data1)
print ((multiKernel(test,1)))
| {"/SILMR.py": ["/network.py", "/multikernel.py", "/util_silmr.py", "/tsne.py", "/project.py"], "/demo.py": ["/network.py", "/multikernel.py", "/util_silmr.py", "/tsne.py", "/project.py", "/SILMR.py"]} |
67,065 | ZEYINWU/Singel-Cell | refs/heads/master | /SILMR.py | import numpy as np
import network as nw
import multikernel as mk
from numpy import linalg as LA
import scipy.io as sio
import util_silmr as us
import tsne as ts
from scipy.cluster.vq import vq, kmeans, whiten
import project as pj
def SIMLR(X,c,no_dim ,k=10, if_impute = False,normalize = False,cores_ratio=5):
#print(X)
if if_impute==True :
X = X.T
X_zeros = np.where(X==0)
if len(X_zeros)>0:
R_zeros = X_zeros[0]
C_zeros = X_zeros[1]
ind = (C_zeros) * X.shape[0] + R_zeros
mm = np.array(np.mean(X,axis=0))[0]
m = mm
for j in range(X.shape[0]-1):
mm = np.concatenate((mm,m),axis=0)
xx = asvectorRow(X)
for i in ind:
xx[i] = mm[i]
X = xx.reshape(X.shape[0],X.shape[1])
X = X.T
if normalize == True :
X = X.T
X = X - np.min(X)
X = X / np.max(X)
C_mean = (np.mean(X,axis=0))
X = (X-C_mean).T
NITER = 40
num = X.shape[1]
r=-1
beta = 0.5
D_Kernels = mk.multiKernel(X.T,cores_ratio)
alphaK = 1.0 / np.repeat(len(D_Kernels),len(D_Kernels))
l=D_Kernels[0].shape[0]
ll=D_Kernels[0].shape[1]
distX = np.repeat(0.0,l*ll).reshape(l,ll)
for z in range(len(D_Kernels)):
distX = distX + D_Kernels[z]
distX = distX / len(D_Kernels)
#print(len(D_Kernels))
res = np.sort(distX)
indices = np.argsort(distX)
distX1 = np.repeat(0.0,distX.shape[0]*distX.shape[1]).reshape(distX.shape[0],distX.shape[1])
idx = np.repeat(0.0,distX.shape[0]*distX.shape[1]).reshape(distX.shape[0],distX.shape[1])
distX1 = res
idx = indices
A = np.repeat(0.0,num*num).reshape(num,num)
di = distX1[:,1:(k+2)]
rr = 0.5 * (k*di[:,k] - np.sum(di[:,range(k)],axis=1))
id = idx[:,1:(k+2)]
numerator = np.repeat(0.0, di.shape[0]*di.shape[1]).reshape(di.shape[0],di.shape[1])
for i in range(di.shape[1]):
numerator[:,i] = di[:,k]
numerator = numerator - di
temp = np.matrix(k*di[:,k] - np.sum(di[:,0:k],axis=1)) + np.finfo(float).eps
# temp=np.array(temp[0,:])
denominator = np.repeat(0.0,(temp.shape[1]) * di.shape[1]).reshape((temp.shape[1]),di.shape[1])
for i in range(temp.shape[1]):
denominator[i,:] = temp[:,i]
temp = numerator / denominator
a = np.repeat(0.0, num * di.shape[1]).reshape(num,di.shape[1])
for i in range(di.shape[1]):
a[:,i] = np.arange(num)
row = asvectorCol(np.matrix(a))[0]
col = asvectorCol(np.matrix(id))[0]
tempvector = asvectorCol(np.matrix(temp))[0]
k=0
for i in range(len(row)):
A[row[i],col[i]] = tempvector[k]
k=k+1
if r<=0:
r = np.mean(np.array(rr))
lambda1=max(np.mean(np.array(rr)),0)
#print(lambda1)
#print("A is ")
#print(A)
A[np.isnan(A)] = 0
A0 = (A+A.T)/2.0
S0 = -(distX-np.max(distX))
#print(distX)
S0 = nw.networkDiffusion(S0,k)
#print(S0)
S0 = nw.dn(S0,"ave")
S = S0
d0 = (np.sum(S,axis = 0))
l = d0.shape[1]
D0 = np.repeat(0.0 , l*l).reshape(l,l)
np.fill_diagonal(D0,d0)
L0 = D0-S
eig1_res = us.eig1(L0,c,0)
F_eig1 = eig1_res[1]
temp_eig1 = eig1_res[0]
#print(temp_eig1)
#raise ValueError("stop")
evs_eig1 =eig1_res[2]
#print(evs_eig1)
#raise ValueError("stop")
converge = list()
for iter in range(NITER):
distf = us.L2_distance_1((F_eig1.T),(F_eig1.T))
#print(distf)
#raise ValueError("stop")
A = np.repeat(0.0,num*num).reshape(num,num)
b = idx[:,1:idx.shape[1]]
a = np.repeat(0,num*b.shape[1]).reshape(num,b.shape[1])
for i in range(b.shape[1]):
a[:,i] = np.arange(num)
inda =np.matrix(np.concatenate((np.matrix(nw.asvectorCol(a)),np.matrix(nw.asvectorCol(b))),axis=0)).T
add=list()
for z in range(inda.shape[0]):
np.set_printoptions(precision=9)
add.append(((distX[inda[z,0],inda[z,1]]) + lambda1 * distf[inda[z,0],inda[z,1]]) / 2.0 /r)
ad = np.matrix(add).reshape(num,b.shape[1]).T
c_input = -np.matrix(ad).T
c_output = np.matrix(ad).T
ad = pj.projsplx(c_input,c_output)
ad1 = asvectorCol(ad)[0]
A1 = asvectorCol(A)
for i in range(inda.shape[0]):
A[inda[i,0],inda[i,1]] = ad1[i]
A[np.isnan(A)] = 0
A = (A + A.T)/2.0
S = (1-beta)* S + beta * A
#print(S)
S = nw.networkDiffusion(np.array(S),k)
D = np.repeat(0.0,S.shape[1]*S.shape[1]).reshape(S.shape[1],S.shape[1])
np.fill_diagonal(D,np.sum(S,axis=0))
L = D - S
F_old = F_eig1
F_eig1 = eig1_res[1]
temp_eig1 = eig1_res[0]
ev_eig1 = eig1_res[2]
evs_eig1 = np.concatenate((np.matrix(evs_eig1),np.matrix(ev_eig1)),axis=0)
# print(evs_eig1)
# raise ValueError("stop")
DD = list()
for i in range(len(D_Kernels)):
temp = np.array(np.finfo(np.float32).eps + D_Kernels[i]) * np.array(S+np.finfo(np.float32).eps)
DD.append(np.mean(np.sum(temp,axis = 0)))
alphaK0 = us.umkl(np.matrix(DD))
alphaK0 = alphaK0 / np.sum(alphaK0)
alphaK = (1-beta)*alphaK + beta * alphaK0
alphaK = alphaK / np.sum(alphaK)
fn1 = np.sum(ev_eig1[0:c])
fn2 = np.sum(ev_eig1[0:c+1])
converge.append(fn2-fn1)
#print(converge)
#raise ValueError("stop")
if iter < 10:
if ev_eig1[len(ev_eig1)-1] > 0.00001 :
lambda1 = 1.5 * lambda1
r = r /1.01
else:
if np.matrix(converge)[0,iter-1]>np.matrix(converge)[0,iter-2]:
S = S_old
if np.matrix(converge)[0,iter-2] > 0.2:
raise Warning("Maybe you should set a larger value of c")
break
S_old = S
#compute Kbeta
#print(alphaK)
distX = np.array(D_Kernels[0]) * np.array(alphaK[0,0])
for i in range(1,len(D_Kernels)) :
distX = distX + np.array(D_Kernels[i]) * alphaK[0,i]
distX1 = np.sort(distX)
inx = np.argsort(distX)
LF = F_eig1
D = np.repeat(0.0, S.shape[1]*S.shape[1]).reshape(S.shape[1],S.shape[1])
np.fill_diagonal(D,np.sum(D,axis=0))
L = D - S
eigen_L = LA.eig(L)
U = eigen_L[1]
D = eigen_L[0]
#print(S)
if len(no_dim)== 1 :
U_index = np.arange(U.shape[1]-no_dim+1, U.shape[1]+1)
U_index = (-np.sort(-U_index))-1
#print("S is ")
#print(type(S))
F_last = ts.tsne(S,no_dim[0], U[:,U_index])
else:
F_last = list()
for i in range(len(no.dim)):
U_index = np.arange(U.shape[1]-no_dim+1, U.shape[1]+1)
U_index = (-np.sort(-U_index))-1
F_last.append(ts.tsne(S,no_dim[i], U[:,U_index]))
#print(U[:,U_index])
#raise ValueError("stop")
y = kmeans(F_last,c)
ydata = ts.tsne(np.matrix(S),2,None)
print(S)
results = list()
#print("y is ")
#print(y)
results.append(y)
#print("S is ")
#print(S)
results.append(S)
#print("F_last is ")
#print(F_last)
results.append("F_last is ")
results.append(F_last)
#print("ydata is ")
#print(ydata)
results.append(ydata)
#print("alphaK is ")
#print(alphaK)
results.append(alphaK)
#print("converge is ")
#print(converge)
results.append(converge)
#print("LF is ")
#print(LF)
results.append(LF)
return results
def asvectorCol(w):
v = w[:,0]
for i in range(w.shape[1]-1):
v = np.concatenate((v,w[:,i+1]),axis=0)
return np.array(v.T)
def asvectorRow(w):
v = np.array(w[0,:])[0]
for i in range(w.shape[0]-1):
v = np.concatenate((v,np.array(w[i+1,:])[0]),axis=0)
return np.array(v)
if __name__=='__main__':
data = sio.loadmat('/Users/Zeyin/Desktop/Study/Bioinformatics/SIMLR-SIMLR/data/Test_1_mECS.mat')
data1 = data["in_X"]
data1 = data1[0:181,0:8988].T
data2 = np.repeat(3,1)
res = SIMLR(data1,data2,data2)
dd=res[4]
print(dd)
import matplotlib.pyplot as plt
plt.plot(dd[:,0],dd[:,1],'*')
plt.show()
| {"/SILMR.py": ["/network.py", "/multikernel.py", "/util_silmr.py", "/tsne.py", "/project.py"], "/demo.py": ["/network.py", "/multikernel.py", "/util_silmr.py", "/tsne.py", "/project.py", "/SILMR.py"]} |
67,066 | ZEYINWU/Singel-Cell | refs/heads/master | /demo.py | import numpy as np
import network as nw
import multikernel as mk
from numpy import linalg as LA
import scipy.io as sio
import util_silmr as us
import tsne as ts
from scipy.cluster.vq import vq, kmeans, whiten
import project as pj
import SILMR as silmr
data = sio.loadmat('/Users/Zeyin/Desktop/Study/Bioinformatics/SIMLR-SIMLR/data/Test_1_mECS.mat')
data1 = data["in_X"]
data1 = data1[0:181,0:8988].T
data2 = np.repeat(3,1)
res = silmr.SIMLR(data1,data2,data2)
dd = res[4]
#import matplotlib.pyplot as plt
#plt.plot(np.array(dd[:,0]),'*',dd[:,1],'.',dd[:,2],'+')
#plt.show()
| {"/SILMR.py": ["/network.py", "/multikernel.py", "/util_silmr.py", "/tsne.py", "/project.py"], "/demo.py": ["/network.py", "/multikernel.py", "/util_silmr.py", "/tsne.py", "/project.py", "/SILMR.py"]} |
67,067 | ZEYINWU/Singel-Cell | refs/heads/master | /eig1.py | from numpy import linalg as LA
def eig1(A):
eigen_A = LA.eig(A)
v = eigen_A[1]
d = eigen_A[0]
d1 = -np.sort(-d)
idx = np.argsort(-d)
idx1 = idx
eigval = d[idx1]
eigvec = v[,idx1].real
eigval_full = d[idx]
res=list()
res.append(eigval)
res.append(eigvec)
res.append(eigval_full)sa
return res
| {"/SILMR.py": ["/network.py", "/multikernel.py", "/util_silmr.py", "/tsne.py", "/project.py"], "/demo.py": ["/network.py", "/multikernel.py", "/util_silmr.py", "/tsne.py", "/project.py", "/SILMR.py"]} |
67,068 | Hellowlol/torrentool | refs/heads/master | /torrentool/cli.py | import click
from os import path, getcwd
from sys import exit
from torrentool import VERSION
from torrentool.api import Torrent
@click.group()
@click.version_option(version='.'.join(map(str, VERSION)))
def start():
"""Torrentool command line utilities."""
@start.group()
def torrent():
"""Torrent-related commands."""
@torrent.command()
@click.argument('source')
@click.option('--dest', default=None, help='Destination path to put .torrent file into. Default: current directory.')
@click.option('--tracker', default=None, help='Tracker announce URL (multiple comma-separated values supported).')
@click.option('--open_trackers', default=False, is_flag=True, help='Add open trackers announce URLs.')
@click.option('--comment', default=None, help='Arbitrary comment.')
@click.option('--cache', default=False, is_flag=True, help='Upload file to torrent cache services.')
def create(source, dest, tracker, open_trackers, comment, cache):
"""Create torrent file from a single file or a directory."""
def check_path(fpath):
fpath = path.abspath(fpath)
if not path.exists(fpath):
click.secho('Path is not found: %s' % fpath, fg='red', err=True)
exit(1)
return fpath
if not dest:
dest = getcwd()
source = check_path(source)
source_title = path.basename(source).replace('.', '_').replace(' ', '_')
dest = check_path(dest)
dest = '%s.torrent' % path.join(dest, source_title)
click.secho('Creating torrent from %s ...' % source)
my_torrent = Torrent.create_from(source)
if comment:
my_torrent.comment = comment
urls = []
if tracker:
urls = tracker.split(',')
if open_trackers:
urls.extend(get_open_trackers())
if urls:
my_torrent.announce_urls = urls
my_torrent.to_file(dest)
click.secho('Torrent file created: %s' % dest, fg='green')
click.secho('Torrent info hash: %s' % my_torrent.info_hash, fg='blue')
if cache:
upload_cache(dest)
def upload_cache(fpath):
"""Uploads .torrent file to a cache server."""
url_base = 'http://torrage.info'
url_upload = '%s/autoupload.php' % url_base
url_download = '%s/torrent.php?h=' % url_base
file_field = 'torrent'
click.secho('Uploading to %s torrent cache service ...')
try:
import requests
response = requests.post(url_upload, files={file_field: open(fpath, 'rb')})
response.raise_for_status()
info_cache = response.text
click.secho('Cached torrent URL: %s' % (url_download + info_cache), fg='yellow')
except (ImportError, requests.RequestException) as e:
if isinstance(e, ImportError):
click.secho('`requests` package is unavailable.', fg='red', err=True)
click.secho('Failed: %s' % e, fg='red', err=True)
def get_open_trackers():
"""Returns open trackers announce URLs list from remote repo or local backup."""
ourl = 'https://raw.githubusercontent.com/idlesign/torrentool/master/torrentool/repo'
ofile = 'open_trackers.ini'
click.secho('Fetching an up-to-date open tracker list ...')
try:
import requests
response = requests.get('%s/%s' % (ourl, ofile), timeout=3)
response.raise_for_status()
open_trackers = response.text.splitlines()
except (ImportError, requests.RequestException) as e:
if isinstance(e, ImportError):
click.secho('`requests` package is unavailable.', fg='red', err=True)
click.secho('Failed. Using built-in open tracker list.', fg='red', err=True)
with open(path.join(path.dirname(__file__), 'repo', ofile)) as f:
open_trackers = map(str.strip, f.readlines())
return open_trackers
def main():
start(obj={})
| {"/torrentool/cli.py": ["/torrentool/api.py"], "/torrentool/api.py": ["/torrentool/torrent.py"], "/torrentool/torrent.py": ["/torrentool/utils.py"], "/tests/test_etc.py": ["/torrentool/utils.py"]} |
67,069 | Hellowlol/torrentool | refs/heads/master | /torrentool/api.py | from .bencode import Bencode
from .torrent import Torrent
| {"/torrentool/cli.py": ["/torrentool/api.py"], "/torrentool/api.py": ["/torrentool/torrent.py"], "/torrentool/torrent.py": ["/torrentool/utils.py"], "/tests/test_etc.py": ["/torrentool/utils.py"]} |
67,070 | Hellowlol/torrentool | refs/heads/master | /torrentool/torrent.py | from os.path import join, isdir, getsize, normpath, basename
from os import walk, sep
from hashlib import sha1
from datetime import datetime
from calendar import timegm
from functools import reduce
from .bencode import Bencode
from .exceptions import TorrentError
from .utils import get_app_version
class Torrent(object):
"""Represents a torrent file, and exposes utilities to work with it."""
_filepath = None
def __init__(self, dict_struct=None):
dict_struct = dict_struct or {'info': {}}
self._struct = dict_struct
def __str__(self):
return 'Torrent: %s' % self.name
@property
def files(self):
"""Files in torrent. List of tuples (filepath, size)."""
files = []
info = self._struct.get('info')
if not info:
return files
if 'files' in info:
base = info['name']
for f in info['files']:
files.append((join(base, *f['path']), f['length']))
else:
files.append((info['name'], info['length']))
return files
@property
def total_size(self):
"""Total size of all files in torrent."""
return reduce(lambda prev, curr: prev + curr[1], self.files, 0)
@property
def info_hash(self):
"""Hash of torrent file info section. Also known as torrent hash."""
info = self._struct.get('info')
if not info:
return None
return sha1(Bencode.encode(info)).hexdigest()
@property
def magnet_link(self):
"""Magnet link using BTIH (BitTorrent Info Hash) URN."""
return 'magnet:?xt=urn:btih:' + self.info_hash
def _get_announce_urls(self):
urls = self._struct.get('announce-list')
if not urls:
urls = self._struct.get('announce')
if not urls:
return []
urls = [[urls]]
return urls
def _set_announce_urls(self, val):
self._struct['announce'] = ''
self._struct['announce-list'] = []
def set_single(val):
del self._struct['announce-list']
self._struct['announce'] = val
types = (list, tuple, set)
if isinstance(val, types):
length = len(val)
if length:
if length == 1:
set_single(val[0])
else:
for item in val:
if not isinstance(item, types):
item = [item]
self._struct['announce-list'].append(item)
self._struct['announce'] = val[0]
else:
set_single(val)
announce_urls = property(_get_announce_urls, _set_announce_urls)
"""List of lists of tracker announce URLs."""
def _get_comment(self):
return self._struct.get('comment')
def _set_comment(self, val):
self._struct['comment'] = val
comment = property(_get_comment, _set_comment)
"""Optional. Free-form textual comments of the author."""
def _get_creation_date(self):
date = self._struct.get('creation date')
if date is not None:
date = datetime.utcfromtimestamp(int(date))
return date
def _set_creation_date(self, val):
self._struct['creation date'] = timegm(val.timetuple())
creation_date = property(_get_creation_date, _set_creation_date)
"""Optional. The creation time of the torrent, in standard UNIX epoch format. UTC."""
def _get_created_by(self):
return self._struct.get('created by')
def _set_created_by(self, val):
self._struct['created by'] = val
created_by = property(_get_created_by, _set_created_by)
"""Optional. Name and version of the program used to create the .torrent"""
def _get_private(self):
return self._struct.get('info', {}).get('private', False)
def _set_private(self, val):
if not val:
try:
del self._struct['info']['private']
except KeyError:
pass
else:
self._struct['info']['private'] = 1
private = property(_get_private, _set_private)
"""Optional. If True the client MUST publish its presence to get other peers
ONLY via the trackers explicitly described in the metainfo file. If False or is not present,
the client may obtain peer from other means, e.g. PEX peer exchange, dht.
"""
def _get_name(self):
return self._struct.get('info', {}).get('name', None)
def _set_name(self, val):
self._struct['info']['name'] = val
name = property(_get_name, _set_name)
""" Torrent's name """
def to_file(self, filepath=None):
"""Writes Torrent object into file, either
:param filepath:
"""
if filepath is None and self._filepath is None:
raise TorrentError('Unable to save torrent to file: no filepath supplied.')
if filepath is not None:
self._filepath = filepath
with open(self._filepath, mode='wb') as f:
f.write(self.to_string())
def to_string(self):
"""Returns bytes representing torrent file.
:param str encoding: Encoding used by strings in Torrent object.
:rtype: bytearray
"""
return Bencode.encode(self._struct)
@classmethod
def _get_target_files_info(cls, src_path):
src_path = u'%s' % src_path # Force walk() to return unicode names.
is_dir = isdir(src_path)
target_files = []
if is_dir:
for base, _, files in walk(src_path):
target_files.extend([join(base, fname) for fname in sorted(files)])
else:
target_files.append(src_path)
target_files_ = []
total_size = 0
for fpath in target_files:
file_size = getsize(fpath)
if not file_size:
continue
target_files_.append((fpath, file_size, normpath(fpath.replace(src_path, '')).strip(sep).split(sep)))
total_size += file_size
return target_files_, total_size
@classmethod
def create_from(cls, src_path):
"""Returns Torrent object created from a file or a directory.
:param str src_path:
:rtype: Torrent
"""
is_dir = isdir(src_path)
target_files, size_data = cls._get_target_files_info(src_path)
SIZE_MIN = 32768 # 32 KiB
SIZE_DEFAULT = 262144 # 256 KiB
SIZE_MAX = 1048576 # 1 MiB
CHUNKS_MIN = 1000 # todo use those limits as advised
CHUNKS_MAX = 2200
size_piece = SIZE_MIN
if size_data > SIZE_MIN:
size_piece = SIZE_DEFAULT
if size_piece > SIZE_MAX:
size_piece = SIZE_MAX
def read(filepath):
with open(filepath, 'rb') as f:
while True:
chunk = f.read(size_piece - len(pieces_buffer))
chunk_size = len(chunk)
if chunk_size == 0:
break
yield chunk
pieces = bytearray()
pieces_buffer = bytearray()
for fpath, _, _ in target_files:
for chunk in read(fpath):
pieces_buffer += chunk
if len(pieces_buffer) == size_piece:
pieces += sha1(pieces_buffer).digest()[:20]
pieces_buffer = bytearray()
if len(pieces_buffer):
pieces += sha1(pieces_buffer).digest()[:20]
pieces_buffer = bytearray()
info = {
'name': basename(src_path),
'pieces': bytes(pieces),
'piece length': size_piece,
}
if is_dir:
files = []
for _, length, path in target_files:
files.append({'length': length, 'path': path})
info['files'] = files
else:
info['length'] = target_files[0][1]
torrent = cls({'info': info})
torrent.created_by = get_app_version()
torrent.creation_date = datetime.utcnow()
return torrent
@classmethod
def from_string(cls, string):
"""Alternative constructor to get Torrent object from string.
:param str string:
:rtype: Torrent
"""
return cls(Bencode.read_string(string))
@classmethod
def from_file(cls, filepath):
"""Alternative constructor to get Torrent object from file.
:param str filepath:
:rtype: Torrent
"""
torrent = cls(Bencode.read_file(filepath))
torrent._filepath = filepath
return torrent
| {"/torrentool/cli.py": ["/torrentool/api.py"], "/torrentool/api.py": ["/torrentool/torrent.py"], "/torrentool/torrent.py": ["/torrentool/utils.py"], "/tests/test_etc.py": ["/torrentool/utils.py"]} |
67,071 | Hellowlol/torrentool | refs/heads/master | /tests/test_etc.py | # -*- encoding: utf-8 -*-
from torrentool.utils import get_app_version
def test_get_app_version():
assert 'torrentool' in get_app_version()
| {"/torrentool/cli.py": ["/torrentool/api.py"], "/torrentool/api.py": ["/torrentool/torrent.py"], "/torrentool/torrent.py": ["/torrentool/utils.py"], "/tests/test_etc.py": ["/torrentool/utils.py"]} |
67,072 | Hellowlol/torrentool | refs/heads/master | /torrentool/utils.py |
def get_app_version():
"""Returns full version string including application name
suitable for putting into Torrent.created_by.
"""
from torrentool import VERSION
return 'torrentool/%s' % '.'.join(map(str, VERSION))
| {"/torrentool/cli.py": ["/torrentool/api.py"], "/torrentool/api.py": ["/torrentool/torrent.py"], "/torrentool/torrent.py": ["/torrentool/utils.py"], "/tests/test_etc.py": ["/torrentool/utils.py"]} |
67,080 | arjun-krishna1/BitHolmes | refs/heads/main | /crytpoaddrverification/__init__.py | import requests
def verify_bitcoin(public_key):
public_key = public_key.strip()
for i in public_key:
value = ord(i)
if value < 48 or value > 122 or (value > 57 and value < 65) or (value > 91 and value < 96):
return False
page = requests.get(f'https://www.blockchain.com/btc/address/{public_key}')
if page.ok:
return True
return False
| {"/app.py": ["/data.py", "/ai_predictions/__init__.py", "/qrcodefunctions/__init__.py", "/rq/__init__.py", "/crytpoaddrverification/__init__.py"]} |
67,081 | arjun-krishna1/BitHolmes | refs/heads/main | /ai_predictions/__init__.py | import tensorflow as tf
import numpy as np
import requests
import pandas as pd
import json
class AddressClassifier:
# 1 is for sure fraud, 0 is for sure not fraud
THRESHOLD = 0.3
BASE_API = "https://blockchain.info/rawaddr/"
MODEL_PATH = "dnn_model"
STATS_PATH = "dataset_stats.json"
ID_FEATURES = ["n_tx", "n_unredeemed", "total_received", "total_sent", "final_balance"]
TRANSACTION_FEATURES = ["ver", "vin_sz", "vout_sz", "size", "weight", "fee", "lock_time", "time"]
ALL_FEATURES = ID_FEATURES + TRANSACTION_FEATURES
def __init__(self):
self.model = tf.keras.models.load_model(self.MODEL_PATH)
with open("dataset_stats1.json", "r") as o:
dataset_stats = json.load(o)
self.mean = pd.Series(dataset_stats["mean"], index=self.ALL_FEATURES, dtype=np.float64)
self.std_dev = pd.Series(dataset_stats["std"], index=self.ALL_FEATURES, dtype=np.float64)
def predict(self, address):
all_data = self.get_data_api(address)
flat_dict = self.flatten_entire_dict(all_data)
flat_arr = self.dict_to_array(flat_dict)
process_arr = self.preprocess(flat_arr)
# get the average prediction from all of this key's transaction
avg_pred = self.model.predict(process_arr).mean()
return (avg_pred > self.THRESHOLD, avg_pred)
def get_data_api(self, address):
url = self.BASE_API + address
resp = requests.get(url)
try:
entire_dict = resp.json()
return entire_dict
except:
return {}
def flatten_entire_dict(self, curr):
id_dict_base = {feat: curr[feat] for feat in self.ID_FEATURES}
tot_vals = []
for tx in curr["txs"]:
this_tx_vals = {}
for tx_feat in self.TRANSACTION_FEATURES:
this_tx_vals[tx_feat] = tx[tx_feat]
tot_vals.append({**id_dict_base, **this_tx_vals})
return tot_vals
def dict_to_array(self, flat_dict):
array = []
for tx in flat_dict:
array.append([tx[feat] for feat in self.ALL_FEATURES])
return array
def preprocess(self, data):
df = pd.DataFrame(data=data, columns=self.ALL_FEATURES)
processed_df = (df - self.mean) / self.std_dev
return processed_df
if __name__ == "__main__":
test_address = "1JxmKkNK1b3p7r8DDPtnNmGeLZDcgPadJb"
classifier = AddressClassifier()
print(classifier.THRESHOLD)
pred = classifier.predict(test_address)
print(pred)
| {"/app.py": ["/data.py", "/ai_predictions/__init__.py", "/qrcodefunctions/__init__.py", "/rq/__init__.py", "/crytpoaddrverification/__init__.py"]} |
67,082 | arjun-krishna1/BitHolmes | refs/heads/main | /qrcodefunctions/__init__.py | from qrcode import make
from time import time
import os, glob
from pathlib import Path
static_path = os.path.join(Path(__file__).parents[1], 'static')
def make_qr(data):
code = make(data)
name = str(time()) + ".png"
code.save("static/" + name)
return name
def make_website_link_qr(public_key, host_url):
return make_qr(host_url + public_key)
def delete_old_files():
files = os.listdir(static_path)
png_to_float = lambda file : float(file.replace(".png", ""))
for file in files:
if ".png" in file:
time_change = time() - png_to_float(file)
if time_change > 86400: # a day
os.remove(os.path.join(static_path, file))
delete_old_files() | {"/app.py": ["/data.py", "/ai_predictions/__init__.py", "/qrcodefunctions/__init__.py", "/rq/__init__.py", "/crytpoaddrverification/__init__.py"]} |
67,083 | arjun-krishna1/BitHolmes | refs/heads/main | /data.py | fraud_level_to_value = {
-1: "broken",
0: "unknown",
1: "fraud",
2: "suspected fraud",
3: "not fraud"
} | {"/app.py": ["/data.py", "/ai_predictions/__init__.py", "/qrcodefunctions/__init__.py", "/rq/__init__.py", "/crytpoaddrverification/__init__.py"]} |
67,084 | arjun-krishna1/BitHolmes | refs/heads/main | /rq/__init__.py | import requests
def get_token():
with open('new_secrets.txt', 'r') as f:
t = f.read()
f.close()
return t
def check_addr(address):
key = get_token()
call = f"https://www.bitcoinabuse.com/api/reports/check?address={address}&api_token={key}"
response_json = requests.api.get(call).json()
count = response_json.get("count", None)
if count is not None:
count = int(count)
if count > 0:
return 1 # 1 or more fraud reports on this address
else:
return 3 # no fraud reports for this address
| {"/app.py": ["/data.py", "/ai_predictions/__init__.py", "/qrcodefunctions/__init__.py", "/rq/__init__.py", "/crytpoaddrverification/__init__.py"]} |
67,085 | arjun-krishna1/BitHolmes | refs/heads/main | /app.py | from flask import *
import data, os
from ai_predictions import AddressClassifier
import qrcodefunctions as qrfunc
from rq import check_addr
from crytpoaddrverification import verify_bitcoin
from datetime import timedelta
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
CL = AddressClassifier()
app = Flask(__name__)
fraud_level = 0
app.secret_key = "7291"
app.permanent_session_lifetime = timedelta(minutes=10)
@app.route("/<address>", methods = ["GET"])
@app.route('/', methods = ["GET", "POST"])
def index(address = None):
global fraud_level
error_one = error_two = 0
pressed = lambda x: x in request.form
if address: #if address passed into the url
if verify_bitcoin(address):
fraud_level = check_addr(address)
return render_template("results.html", fraud_level = fraud_level)
else:
error_one = "That bitcoin public key was not found"
elif pressed('public-key-submit-qr') and request.form['public-key-input-qr']:
public_key_qr = request.form['public-key-input-qr']
if verify_bitcoin(public_key_qr):
return redirect("/qr/" + public_key_qr)
else:
error_two = "That bitcoin public key was not found"
elif pressed('public-key-submit') and request.form['public-key-input']: #if submit button is pressed
public_key = request.form['public-key-input']
if verify_bitcoin(public_key):
fraud_level = check_addr(public_key)
fraud_percentage = 0
if fraud_level == 3:
# 2
# DO DEEP LEARNING CHECK
is_fraud, fraud_percentage = CL.predict(public_key)
if is_fraud:
fraud_level = 2
print(fraud_level, is_fraud, fraud_percentage)
return render_template("results.html", fraud_level = fraud_level, fraud_percentage = fraud_percentage )
else:
error_one = "That bitcoin public key was not found"
return render_template("base.html", errors = [error_one, error_two])
@app.route("/qr/<address>", methods = ["GET"])
@app.route('/qr/', methods = ["GET"])
def qr(address = None):
error_two = 0
if address is None:
return redirect("/")
if verify_bitcoin(address):
qrfunc.delete_old_files()
qr_hash = qrfunc.make_website_link_qr(address, request.host_url)
location = url_for('static', filename = qr_hash)
else:
error_two = "That public key was not found"
return render_template("base.html", errors=[0, error_two])
return render_template("qr.html", location = location)
@app.route('/result/', methods = ["GET"])
def result():
return render_template("result.html")
@app.route("/reports/<address>", methods = ["GET"])
@app.route('/reports/', methods = ["GET", "POST"])
def reports(address = None):
error_one = 0
if 'report-btn-submit' in request.form and request.form['report-key-input']:
reported_public_key = request.form['report-key-input']
if verify_bitcoin(reported_public_key):
#DATABASE ADDITION LOCATION
return redirect("/")
else:
error_one = "That public key was not found"
return render_template("reports.html", errors = [error_one])
@app.context_processor
def override_url_for():
return dict(url_for=dated_url_for)
def dated_url_for(endpoint, **values):
if endpoint == "static":
filename = values.get('filename', None)
if filename:
path = os.path.join(app.root_path, endpoint, filename)
values['q'] = int(os.stat(path).st_mtime)
return url_for(endpoint, **values)
if __name__ == "__main__":
# app.run(host='localhost', debug=False, port=8000, threaded=True)
app.run(debug=True)
| {"/app.py": ["/data.py", "/ai_predictions/__init__.py", "/qrcodefunctions/__init__.py", "/rq/__init__.py", "/crytpoaddrverification/__init__.py"]} |
67,101 | thefunkjunky/CodeSF-Suggest-v2 | refs/heads/master | /codesf_suggest/config.py | import json
import urllib
import sys
import os
class DevelopmentConfig(object):
# try:
# with open("config_variables.json", 'r') as cfg_file:
# cfg_params = json.load(cfg_file)
# except Exception as e:
# sys.exit()
# SECRET_KEY = cfg_params['secret_key']
DEBUG = True
class TestingConfig(object):
# try:
# with open("test_config_variables.json", 'r') as cfg_file:
# cfg_params = json.load(cfg_file)
# except Exception as e:
# sys.exit()
# SECRET_KEY = cfg_params['secret_key']
DEBUG = True
| {"/codesf_suggest/api.py": ["/codesf_suggest/main.py"], "/codesf_suggest/utils.py": ["/codesf_suggest/main.py"], "/tests/api_tests.py": ["/codesf_suggest/main.py"]} |
67,102 | thefunkjunky/CodeSF-Suggest-v2 | refs/heads/master | /codesf_suggest/models.py |
from google.appengine.ext import ndb
from werkzeug.security import generate_password_hash, check_password_hash
class User(ndb.Expando):
""" Base User Class """
password_ = ndb.StringProperty()
name = ndb.StringProperty()
username = ndb.StringProperty()
email = ndb.StringProperty()
organization = ndb.StringProperty()
position = ndb.StringProperty()
description = ndb.StringProperty()
start_date = ndb.DateTimeProperty(auto_now_add=True)
last_modified = ndb.DateTimeProperty(auto_now=True)
image = ndb.StringProperty()
# Foreign relationships
# posts = ndb.StructuredProperty(Post, repeated=True)
@property
def password(self):
return self.password_
@password.setter
def password(self, value):
if value:
self.password_ = generate_password_hash(str(value), method='pbkdf2:sha256', salt_length=16)
self.put()
def as_dictionary(self):
user_dict = {
"password": self.password,
"name": self.name,
"username": self.username,
"email": self.email,
"organization": self.organization,
"position": self.position,
"description": self.description,
"start_date": self.start_date,
"last_modified": self.last_modified,
"image": self.image,
}
return user_dict
class Post(ndb.Model):
""" Base Post Class """
title = ndb.StringProperty()
short_description = ndb.StringProperty()
long_description = ndb.StringProperty()
organization = ndb.StringProperty()
image = ndb.StringProperty()
start_date = ndb.DateTimeProperty(auto_now_add=True)
last_modified = ndb.DateTimeProperty(auto_now=True)
slack = ndb.StringProperty()
user = ndb.KeyProperty(kind=User)
def as_dictionary(self):
post_dict = {
"title": self.title,
"short_description": self.short_description,
"long_description": self.long_description,
"organization": self.organization,
"image": self.image,
"start_date": self.start_date,
"last_modified": self.last_modified,
"slack": self.slack,
}
class TestUser(User):
"""Test User Class"""
pass
class TestPost(Post):
"""Test Post Class"""
user = ndb.KeyProperty(kind=TestUser)
pass
| {"/codesf_suggest/api.py": ["/codesf_suggest/main.py"], "/codesf_suggest/utils.py": ["/codesf_suggest/main.py"], "/tests/api_tests.py": ["/codesf_suggest/main.py"]} |
67,103 | thefunkjunky/CodeSF-Suggest-v2 | refs/heads/master | /codesf_suggest/main.py | import os
import sys
from flask import Flask
from google.appengine.ext import vendor
# Add any libraries installed in the "lib" folder.
vendor.add('lib')
sys.path.insert(0, 'lib')
app = Flask(__name__)
config_path = os.environ.get("CONFIG_PATH", "codesf_suggest.config.DevelopmentConfig")
app.config.from_object(config_path)
from . import api
from . import views
| {"/codesf_suggest/api.py": ["/codesf_suggest/main.py"], "/codesf_suggest/utils.py": ["/codesf_suggest/main.py"], "/tests/api_tests.py": ["/codesf_suggest/main.py"]} |
67,104 | thefunkjunky/CodeSF-Suggest-v2 | refs/heads/master | /config_wizard.py | import os
from base64 import b64encode
import json
import getpass
# Using json config file to avoid uploading passwords to git, and dealing with different
# PostgreSQL usernames on different machines
def main():
# I was taught to use this as an environment variable, but I don't
# like that for some reason. So I'm just doing it here. This is
# probably more insecure though, might want to look into that.
random_bytes = os.urandom(64)
token = b64encode(random_bytes).decode('utf-8')
secret_env_key = token
conf_dict = {
"secret_key": secret_env_key
}
filename = "main_config_variables.json"
with open(filename, 'w') as cfg:
cfg.write(json.dumps(conf_dict,
sort_keys=True,
indent=4,
separators=(',', ': ')
)
)
# Change dbname to the test db and write test config file
filename = "test_config_variables.json"
with open(filename, 'w') as cfg:
cfg.write(json.dumps(conf_dict,
sort_keys=True,
indent=4,
separators=(',', ': ')
)
)
if __name__ == '__main__':
main()
| {"/codesf_suggest/api.py": ["/codesf_suggest/main.py"], "/codesf_suggest/utils.py": ["/codesf_suggest/main.py"], "/tests/api_tests.py": ["/codesf_suggest/main.py"]} |
67,105 | thefunkjunky/CodeSF-Suggest-v2 | refs/heads/master | /codesf_suggest/api.py | import os.path
import json
from flask import request, Response, url_for, send_from_directory
from werkzeug.utils import secure_filename
from jsonschema import validate, ValidationError
from google.appengine.ext import ndb
from . import models
from . import decorators
from .main import app
# JSON scheme validators
user_POST_schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"email": {"type": "string"},
"password": {"type": "string"},
"organization": {"type": "string"},
"position": {"type": "string"},
"description": {"type": "string"},
"image": {"type": "string"},
},
"required": ["name", "email", "password"]
}
post_POST_schema = {
"type": "object",
"properties": {
"title": {"type": "string"},
"short_description": {"type": "string"},
"long_description": {"type": "string"},
"organization": {"type": "string"},
"image": {"type": "string"},
"user_id": {"type": "number"},
"slack": {"type": "string"},
},
"required": ["title", "user_id", "short_description"]
}
user_PUT_schema = {
"type": "object",
"properties": {
"id": {"type": "number"},
"name": {"type": "string"},
"email": {"type": "string"},
"password": {"type": "string"},
"organization": {"type": "string"},
"position": {"type": "string"},
"description": {"type": "string"},
"image": {"type": "string"},
},
"required": ["id"]
}
post_PUT_schema = {
"type": "object",
"properties": {
"id": {"type": "number"},
"title": {"type": "string"},
"short_description": {"type": "string"},
"long_description": {"type": "string"},
"organization": {"type": "string"},
"image": {"type": "string"},
"admin_id": {"type": "number"},
"slack": {"type": "string"},
},
"required": ["id"]
}
DELETE_schema = {
"type": "object",
"properties": {
"id": {"type": "number"},
},
"required": ["id"]
}
### Define the API endpoints
############################
# GET endpoints
############################
def check_post_id(post_id):
post_key = ndb.Key(urlsafe=post_id)
post = post_key.get()
if not post:
message = "Could not find post with id {}".format(post_id)
data = json.dumps({"message": message})
return Response(data, 404, mimetype="application/json")
return post
def check_user_id(user_id):
user_key = ndb.Key(urlsafe=user_id)
user = user_key.get()
if not user:
message = "Could not find user with id {}".format(user_id)
data = json.dumps({"message": message})
return Response(data, 404, mimetype="application/json")
return user
@app.route("/api/posts", methods=["GET"])
@app.route("/api/users/<int:user_id>/posts", methods=["GET"])
@decorators.accept("application/json")
def posts_get(user_id=None):
""" Returns a list of posts """
if user_id:
user = check_user_id(user_id)
# Figure out ancestor queries
posts = models.Post.query(ancestor=user.key).fetch().order(models.Post.id)
else:
posts = models.Post.query().order(models.Post.id)
if not posts:
message = "No posts found."
data = json.dumps({"message": message})
return Response(data, 404, mimetype="application/json")
data = json.dumps([post.as_dictionary() for post in posts],
default=json_serial)
return Response(data, 200, mimetype="application/json")
@app.route("/api/posts/<int:post_id>", methods=["GET"])
@decorators.accept("application/json")
def post_get(post_id):
""" Returns a specific post """
post = check_post_id(post_id)
# Check for post's existence
if not post:
message = "Could not find post with id {}".format(post_id)
data = json.dumps({"message": message})
return Response(data, 404, mimetype="application/json")
data = json.dumps(post.as_dictionary(), default=json_serial)
return Response(data, 200, mimetype="application/json")
@app.route("/api/users/<int:user_id>", methods=["GET"])
@decorators.accept("application/json")
def user_get(user_id):
""" Returns User data """
user = check_user_id(user_id)
if not user:
message = "Could not find user with id #{}".format(user_id)
data = json.dumps({"message": message})
return Response(data, 404, mimetype="application/json")
data = json.dumps(user.as_dictionary(), default=json_serial)
return Response(data, 200, mimetype="application/json")
############################
# POST endpoints
############################
@app.route("/api/posts", methods=["POST"])
@decorators.accept("application/json")
@decorators.require("application/json")
def posts_post():
"""Adds a new post"""
data = request.json
# Validate submitted header data, as json, against schema
try:
validate(data, post_POST_schema)
except ValidationError as error:
data = {"message": error.message}
return Response(json.dumps(data), 422, mimetype="application/json")
user = check_user_id(user_id)
post = models.Post(parent=user.key, **data)
# Return a 201 Created, containing the post as JSON and with the
# Location header set to the location of the post
data = json.dumps(post.as_dictionary(), default=json_serial)
headers = {"Location": url_for("post_get", post_id=post.id)}
return Response(data, 201, headers=headers, mimetype="application/json")
@app.route("/api/users", methods=["POST"])
@decorators.accept("application/json")
@decorators.require("application/json")
def users_post():
"""Adds a new user"""
data = request.json
# Validate submitted header data, as json, against schema
try:
validate(data, user_POST_schema)
except ValidationError as error:
data = {"message": error.message}
return Response(json.dumps(data), 422, mimetype="application/json")
user = models.User.query(email == data["email"])
if user:
message = "User with email {} already exists.".format(user.email)
data = json.dumps({"message": message})
return Response(data, 403, mimetype="application/json")
user = models.User(**data)
user.put()
# Return a 201 Created, containing the user as JSON and with the
# Location header set to the location of the user
data = json.dumps(user.as_dictionary(), default=json_serial)
headers = {"Location": url_for("user_get", user_id=user.id)}
return Response(data, 201, headers=headers, mimetype="application/json")
############################
# PUT endpoints
############################
@app.route("/api/posts/", methods=["PUT"])
@decorators.accept("application/json")
@decorators.require("application/json")
def post_put():
""" Edits post """
data = request.json
# Validate submitted header data, as json, against schema
try:
validate(data, post_PUT_schema)
except ValidationError as error:
data = {"message": error.message}
return Response(json.dumps(data), 422, mimetype="application/json")
# Init post object with id=data["id"]
post = check_post_id(data["id"])
# Update target post
data.pop("id", None)
for key, value in data.items():
setattr(post, key, value)
post.put()
data = json.dumps(post.as_dictionary(), default=json_serial)
headers = {"Location": url_for("post_get", elect_id=post.id)}
return Response(data, 200, headers=headers, mimetype="application/json")
@app.route("/api/users", methods=["PUT"])
@decorators.accept("application/json")
@decorators.require("application/json")
def user_put():
""" Edits user """
data = request.json
# Validate submitted header data, as json, against schema
try:
validate(data, user_PUT_schema)
except ValidationError as error:
data = {"message": error.message}
return Response(json.dumps(data), 422, mimetype="application/json")
# Init user object with id=data["id"]
user = check_user_id(data["id"])
if data["email"] != user.email:
user_verify = models.Users.query(models.Users.email == data["email"])
if user_verify:
message = "User with email {} already exists.".format(
duplicate_user.id)
data = json.dumps({"message": message})
return Response(data, 403, mimetype="application/json")
# Update target user
data.pop("id", None)
for key, value in data.items():
setattr(user, key, value)
user.put()
data = json.dumps(user.as_dictionary(), default=json_serial)
headers = {"Location": url_for("user_get", elect_id=user.id)}
return Response(data, 200, headers=headers, mimetype="application/json")
############################
# DELETE endpoints
############################
@app.route("/api/posts", methods=["DELETE"])
@decorators.accept("application/json")
@decorators.require("application/json")
def post_delete():
""" Deletes post """
data = request.json
# Validate submitted header data, as json, against schema
try:
validate(data, DELETE_schema)
except ValidationError as error:
data = {"message": error.message}
return Response(json.dumps(data), 422, mimetype="application/json")
post = check_post_id(data["id"])
# Deletes post object with id=data["id"]
post.key.delete()
message = "Deleted post id #{}".format(data["id"])
data = json.dumps({"message": message})
headers = {"Location": url_for("posts_get")}
return Response(data, 200, headers=headers, mimetype="application/json")
@app.route("/api/users", methods=["DELETE"])
@decorators.accept("application/json")
@decorators.require("application/json")
def user_delete():
""" Deletes user """
data = request.json
# Validate submitted header data, as json, against schema
try:
validate(data, DELETE_schema)
except ValidationError as error:
data = {"message": error.message}
return Response(json.dumps(data), 422, mimetype="application/json")
user = check_user_id(data["id"])
# Deletes user object with id=data["id"]
user.key.delete()
message = "Deleted user id #{}".format(data["id"])
data = json.dumps({"message": message})
headers = {"Location": url_for("users_get")}
return Response(data, 200, headers=headers, mimetype="application/json")
| {"/codesf_suggest/api.py": ["/codesf_suggest/main.py"], "/codesf_suggest/utils.py": ["/codesf_suggest/main.py"], "/tests/api_tests.py": ["/codesf_suggest/main.py"]} |
67,106 | thefunkjunky/CodeSF-Suggest-v2 | refs/heads/master | /codesf_suggest/utils.py | import os.path
from codesf_suggest.main import app
| {"/codesf_suggest/api.py": ["/codesf_suggest/main.py"], "/codesf_suggest/utils.py": ["/codesf_suggest/main.py"], "/tests/api_tests.py": ["/codesf_suggest/main.py"]} |
67,107 | thefunkjunky/CodeSF-Suggest-v2 | refs/heads/master | /tests/api_tests.py | import unittest
import json
try: from urllib.parse import urlparse
except ImportError: from urlparse import urlparse # Py2 compatibility
from google.appengine.ext import ndb
from google.appengine.ext import testbed
# Configure our app to use the testing databse
# os.environ["CONFIG_PATH"] = "codesf_suggest.config.TestingConfig"
from codesf_suggest.main import app
from codesf_suggest import models
class TestAPI(unittest.TestCase):
""" Tests for the API """
def setUp(self):
""" Test setup """
self.client = app.test_client()
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Next, declare which service stubs you want to use.
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
# Clear ndb's in-context cache between tests.
# This prevents data from leaking between tests.
# Alternatively, you could disable caching by
# using ndb.get_context().set_cache_policy(False)
ndb.get_context().clear_cache()
def populateDB(self, userdata={}, postdata={}):
""" Populates example User and Post for testing """
if not userdata:
userdata = {
"name": "John Doe",
"username": "jdoe",
"email": "jdoe@nowhere.com",
"password": "asdf",
"organization": "Bum Inc.",
"position": "CEO",
"description": "He's just this guy, you know..."
}
self.user = models.TestUser(**userdata)
self.user.put()
if not postdata:
postdata = {
"title": "Post",
"short_description": "A short post",
"long_description": "A slightly longer post",
"parent": self.user.key
}
self.post = models.TestPost(**postdata)
self.post.put()
def test_unsupported_accept_header(self):
response = self.client.get("/api/posts",
headers=[("Accept", "application/xml")]
)
self.assertEqual(response.status_code, 406)
self.assertEqual(response.mimetype, "application/json")
data = json.loads(response.data.decode("ascii"))
self.assertEqual(data["message"],
"Request must accept application/json data")
def test_get_empty_datasets(self):
""" Getting posts, users from an empty database """
endpoints = ["posts", "users",]
for endpoint in endpoints:
response = self.client.get("/api/{}".format(endpoint),
headers=[("Accept", "application/json")])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.mimetype, "application/json")
data = json.loads(response.data.decode("ascii"))
self.assertEqual(data, [])
def test_direct_db_insert_get(self):
"""Test successful creation/query of user/post via direct ndb"""
self.populateDB()
queryuser = models.TestUser.query(models.TestUser.email == "jdoe@nowhere.com").get()
self.assertEqual(queryuser.username, "jdoe")
querypost = models.TestPost.query(models.TestPost.title == "Post").get()
self.assertEqual(querypost.short_description, "A short post")
def test_post_ancestory(self):
"""Tests ancestor relationship of a Post to a User in Datastore"""
self.populateDB()
queryuser = models.TestUser.query(models.TestUser.email == "jdoe@nowhere.com").get()
querypost = queryuser.query(models.TestPost.user == queryuser.key).get()
self.assertEqual(querypost.title, "Post")
def tearDown(self):
""" Test teardown """
self.testbed.deactivate()
| {"/codesf_suggest/api.py": ["/codesf_suggest/main.py"], "/codesf_suggest/utils.py": ["/codesf_suggest/main.py"], "/tests/api_tests.py": ["/codesf_suggest/main.py"]} |
67,135 | Alfiesan/earthquakePrediction | refs/heads/master | /library/TestIO.py | import numpy as np
import logging, dill, fnmatch, os
from embedding.EmbeddingIO import EmbeddingIO
class TestIO:
def __init__(self, embeddingType = 'one-stats-test'):
"""
self.sourceSSD = '/home/exx/muktadir/data/train.csv'
self.sourceHDD = '/home/exx/muktadir/data/train.csv'
self.destFolderSSD = '/home/exx/muktadir/data/'
self.destFolderHDD = '/home/exx/muktadir/data/'
"""
self.sourceSSD = 'C:/earthquake/test/'
self.sourceHDD = 'F:/myProjects/cmps242/earthquake/data/test/'
self.destFolderSSD = 'C:/earthquake/test/one'
self.destFolderHDD = 'F:/myProjects/cmps242/earthquake/data/'
self.destFolder = self.destFolderSSD
self.sourceFolder = self.sourceSSD
self.io = EmbeddingIO()
self.embeddingType = embeddingType
pass
def save(self, anEm):
self.io.save(anEm, self.embeddingType)
def readById(self, embeddingId):
return self.io.readById(embeddingId, self.embeddingType) | {"/library/TestIO.py": ["/embedding/EmbeddingIO.py"], "/TinyFFNWithStats.py": ["/library/MultipleBinDataGenerator.py", "/library/LivePlotKeras.py"], "/library/MultipleBinDataGenerator.py": ["/data_analysis/library/Bin.py", "/data_analysis/library/BinIO.py", "/data_analysis/library/Scalers.py", "/embedding/OneStatsEmbedding.py", "/embedding/CNNStatsEmbedding.py", "/embedding/MultipleBinEmbeddingType.py", "/embedding/EmbeddingCache.py", "/embedding/EmbeddingIO.py"], "/data_analysis/library/PositiveBinManager.py": ["/data_analysis/library/Bin.py", "/data_analysis/library/BinProcessor.py", "/data_analysis/library/BinIO.py"], "/embedding/Embedding.py": ["/data_analysis/library/Bin.py", "/embedding/SourceCardinality.py"], "/embedding/OneStatsEmbedding.py": ["/data_analysis/library/Bin.py", "/data_analysis/library/BinProcessor.py", "/embedding/Embedding.py", "/embedding/SourceCardinality.py", "/embedding/Stats.py"], "/embedding/BinToEmbedding.py": ["/data_analysis/library/Bin.py"], "/embedding/Stats.py": ["/data_analysis/library/Bin.py"], "/library/OneStatsGeneratorForTestPos.py": ["/library/TestIO.py", "/embedding/OneStatsEmbedding.py", "/embedding/EmbeddingCache.py", "/data_analysis/library/Scalers.py"], "/data_analysis/library/BinJoiner.py": ["/data_analysis/library/Bin.py", "/data_analysis/library/BinIO.py"], "/data_analysis/library/BinProcessor.py": ["/data_analysis/library/Bin.py"], "/embedding/EmbeddingIO.py": ["/embedding/EmbeddingCache.py"], "/data_analysis/library/BinIO.py": ["/data_analysis/library/Bin.py"], "/data_analysis/library/RawBinManager.py": ["/data_analysis/library/Bin.py", "/data_analysis/library/BinIO.py", "/data_analysis/library/Scalers.py"], "/data_analysis/library/BinNormalizer.py": ["/data_analysis/library/Bin.py", "/data_analysis/library/BinProcessor.py", "/data_analysis/library/BinIO.py"], "/library/SingleBinDataGenerator.py": ["/data_analysis/library/Bin.py", "/data_analysis/library/BinIO.py", "/embedding/BinEmbedding.py"], "/embedding/CNNStatsEmbedding.py": ["/data_analysis/library/Bin.py", "/embedding/Embedding.py", "/embedding/SourceCardinality.py", "/embedding/OneStatsEmbedding.py"], "/library/EmbeddingStatsGeneratorForTestPos.py": ["/library/TestIO.py", "/embedding/OneStatsEmbedding.py", "/embedding/CNNStatsEmbedding.py", "/embedding/EmbeddingCache.py", "/data_analysis/library/Scalers.py"], "/embedding/BinEmbedding.py": ["/data_analysis/library/Bin.py", "/embedding/Embedding.py"]} |
67,136 | Alfiesan/earthquakePrediction | refs/heads/master | /estimator/EstimatorManager.py | from estimator.EstimatorIO import EstimatorIO
class EstimatorManager:
def __init__(self):
self.io = EstimatorIO()
| {"/library/TestIO.py": ["/embedding/EmbeddingIO.py"], "/TinyFFNWithStats.py": ["/library/MultipleBinDataGenerator.py", "/library/LivePlotKeras.py"], "/library/MultipleBinDataGenerator.py": ["/data_analysis/library/Bin.py", "/data_analysis/library/BinIO.py", "/data_analysis/library/Scalers.py", "/embedding/OneStatsEmbedding.py", "/embedding/CNNStatsEmbedding.py", "/embedding/MultipleBinEmbeddingType.py", "/embedding/EmbeddingCache.py", "/embedding/EmbeddingIO.py"], "/data_analysis/library/PositiveBinManager.py": ["/data_analysis/library/Bin.py", "/data_analysis/library/BinProcessor.py", "/data_analysis/library/BinIO.py"], "/embedding/Embedding.py": ["/data_analysis/library/Bin.py", "/embedding/SourceCardinality.py"], "/embedding/OneStatsEmbedding.py": ["/data_analysis/library/Bin.py", "/data_analysis/library/BinProcessor.py", "/embedding/Embedding.py", "/embedding/SourceCardinality.py", "/embedding/Stats.py"], "/embedding/BinToEmbedding.py": ["/data_analysis/library/Bin.py"], "/embedding/Stats.py": ["/data_analysis/library/Bin.py"], "/library/OneStatsGeneratorForTestPos.py": ["/library/TestIO.py", "/embedding/OneStatsEmbedding.py", "/embedding/EmbeddingCache.py", "/data_analysis/library/Scalers.py"], "/data_analysis/library/BinJoiner.py": ["/data_analysis/library/Bin.py", "/data_analysis/library/BinIO.py"], "/data_analysis/library/BinProcessor.py": ["/data_analysis/library/Bin.py"], "/embedding/EmbeddingIO.py": ["/embedding/EmbeddingCache.py"], "/data_analysis/library/BinIO.py": ["/data_analysis/library/Bin.py"], "/data_analysis/library/RawBinManager.py": ["/data_analysis/library/Bin.py", "/data_analysis/library/BinIO.py", "/data_analysis/library/Scalers.py"], "/data_analysis/library/BinNormalizer.py": ["/data_analysis/library/Bin.py", "/data_analysis/library/BinProcessor.py", "/data_analysis/library/BinIO.py"], "/library/SingleBinDataGenerator.py": ["/data_analysis/library/Bin.py", "/data_analysis/library/BinIO.py", "/embedding/BinEmbedding.py"], "/embedding/CNNStatsEmbedding.py": ["/data_analysis/library/Bin.py", "/embedding/Embedding.py", "/embedding/SourceCardinality.py", "/embedding/OneStatsEmbedding.py"], "/library/EmbeddingStatsGeneratorForTestPos.py": ["/library/TestIO.py", "/embedding/OneStatsEmbedding.py", "/embedding/CNNStatsEmbedding.py", "/embedding/EmbeddingCache.py", "/data_analysis/library/Scalers.py"], "/embedding/BinEmbedding.py": ["/data_analysis/library/Bin.py", "/embedding/Embedding.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.