index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
52,003 | opsiff/djangowebsite | refs/heads/master | /XianYuBack/message/migrations/0001_initial.py | # Generated by Django 2.2.7 on 2019-11-28 13:51
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('msgId', models.CharField(max_length=30, primary_key=True, serialize=False)),
('linkman', models.CharField(max_length=20)),
('contactWay', models.CharField(max_length=30)),
('price', models.FloatField()),
('detail', models.CharField(max_length=240)),
('img', models.CharField(max_length=200)),
('post_date', models.DateTimeField(auto_now_add=True)),
('last_date', models.DateTimeField(auto_now=True)),
],
),
]
| {"/XianYuBack/authorize/views.py": ["/XianYuBack/authorize/models.py"]} |
52,004 | opsiff/djangowebsite | refs/heads/master | /XianYuBack/authorize/views.py | from django.shortcuts import render
from django.http import HttpResponse, HttpResponseNotFound,JsonResponse
from django.views.decorators.http import require_http_methods
from .models import User,OnlineUser
import json
from .utils import jwcAuth
from .utils import Token
# Create your views here.
@require_http_methods(["GET", "POST"])
def loginIn(request):
result={
"status": 500,
"message":None,
"data":None
}
if request.method == 'GET':
msg="请使用post请求"
result["message"]=msg
return JsonResponse(result,safe=False,status=200)
elif request.method == 'POST':
try:
userAccount = request.POST["muser"]
passWord= request.POST["passwd"]
dic={}
dic["muser"]=userAccount
dic["passwd"]=passWord
dic=jwcAuth.request(dic)
if dic==-1:
raise Exception
#检查登录
res = User.objects.filter(account=userAccount)
if len(res)==0:
print("为用户{0}创建账户".format(userAccount))
User.objects.create(account=userAccount, passwd=passWord)
else:
print("用户{0}登录成功".format(userAccount))
#生成/修改token
res = OnlineUser.objects.filter(account=userAccount)
if len(res) == 0:
print("为用户{0}创建token".format(userAccount))
OnlineUser.objects.create(account=userAccount,token=Token.get_token(str(userAccount)))
else:
print("为用户{0}修改token".format(userAccount))
res[0].token=Token.get_token(str(userAccount))
result["status"]=200
result["message"]="Success"
result["data"]=dic
return JsonResponse(result,safe=False,status=200)
except Exception as e:
result["message"]=str(e)
return JsonResponse(result,safe=False,status=200)
@require_http_methods(["GET", "POST"])
def loginOut(request):
result = {
"status": 500,
"message": None,
"data": []
}
if request.method=="POST":
userAccount = request.POST["muser"]
passWord = request.POST["passwd"]
res = OnlineUser.objects.filter(account=userAccount)
if len(res) == 0:
print("用户{0}没有在线".format(userAccount))
result["message"]='Failed to delete token'
# OnlineUser.objects.create(account=userAccount, token=Token.get_token(str(userAccount)))
else:
print("为用户{0}删除token".format(userAccount))
result["message"] = 'Success'
res[0].delete()
return JsonResponse(result,safe=False,status=200)
# def check(request):
def search(request, sort="default"): # 提交信息 二手信息、任务信息 排序方式
result = {
"status": 500,
"message": None,
"data": None
}
list_meg = []
if request.method == "POST":
linkman = request.POST["linkman"]
qs = Message.objects.filter(linkman=linkman)
count = 0
for i in qs:
dict = {'msgId': i.msgId, 'linkman': i.linkman, 'contactWay': i.contactWay, 'price': i.price,
'detail': i.detail, 'img': i.img, 'post_date': str(i.post_date), 'last_date': str(i.last_date)}
list_meg.append(dict)
count += 1
if count >= 15:
break
result['data'] = list_meg
print(json.dumps(result, ensure_ascii=False))
return HttpResponse(json.dumps(result, ensure_ascii=False))
else:
result['data'] = list_meg
print(json.dumps(result, ensure_ascii=False))
return HttpResponse(json.dumps(result, ensure_ascii=False)) | {"/XianYuBack/authorize/views.py": ["/XianYuBack/authorize/models.py"]} |
52,005 | opsiff/djangowebsite | refs/heads/master | /XianYuBack/message/views.py | from django.shortcuts import render
import json,time
import os
from PIL import Image
from utils.common import hashFile
from .models import Message
from django.http import HttpResponse, HttpResponseNotFound,JsonResponse
# Create your views here.
def returnDetail(request,id,category):
pass
def submmit(request,category): #提交信息 二手信息、任务信息
result = {
"status": 500,
"message": None,
"data": None
}
if request.method=="POST":
try:
msgId=str((int(time.perf_counter() * 1000000000000)))
#msgId=str(int(round(time.time())*100000))
price=request.POST["price"]
phone=request.POST["phone"]
linkman=request.POST["user"]
userID=request.POST["userID"]
detail=request.POST["detail"]
imgSrc=request.FILES.get('img')
try:
price=request.POST["price"]
print(price)
except Exception as e:
print(e)
res=Message.objects.filter(msgId=msgId)
import random
ranstr=str(random.randint(65536,214748364))
msgpathname=ranstr+imgSrc.name
if len(res)==0:
print("插入图片")
print(imgSrc.name)
filename="./media/"+msgpathname
with open(filename,'wb') as f:
for c in imgSrc.chunks():
f.write(c)
# Message.objects.create(msgId=msgId, img=imgList[1:])
# img=Image.open(filename)
# w,h=img.size
# img.resize((w/2,h/2),Image.ANTIALIAS)
# img.save(filename,'jpeg',quality=95)
else:
pass
Message.objects.create(msgId=msgId, linkman=linkman,userID=userID, contactWay=phone, price=float(price), detail=detail,img=msgpathname)
result["message"] = 'Success'
result["data"]="hello"
except Exception as e:
print(e)
result["message"]=str(e)
return JsonResponse(result,safe=False)
def returnList(request,category,sort="default"):#提交信息 二手信息、任务信息 排序方式
result = {
"status": 500,
"message": None,
"data": None
}
list_meg=[]
#TODO: 返回二手信息列表
qs = Message.objects.filter(isDel=False)
# list_meg=Message.objects.filter(price = 1000000)
count=0
for i in qs:
dict = {'msgId': i.msgId, 'linkman': i.linkman, 'contactWay': i.contactWay,'price': i.price, 'detail': i.detail,'img': i.img, 'post_date': str(i.post_date), 'last_date': str(i.last_date)}
list_meg.append(dict)
count+=1
if count>=20:
break;
result['data']=list_meg
print(json.dumps(result, ensure_ascii=False))
return HttpResponse(json.dumps(result, ensure_ascii=False))
def returnimg(request,category):
if request.method=="GET":
try:
# print (os.getcwd())
filename=request.GET.get('img')
f=open( os.getcwd()+'/media/'+filename, 'rb+' )
imgSrc=f.read()
# print(imgSrc)
f.close()
result=imgSrc
return HttpResponse(result)
except Exception as e:
print(e)
def search(request,category,sort="default"): # 提交信息 二手信息、任务信息 排序方式
result = {
"status": 500,
"message": None,
"data": None
}
list_meg = []
if request.method == "POST":
linkman = request.POST["Query"]
qs = Message.objects.filter(linkman=linkman,isDel=False)
count = 0
for i in qs:
dict = {'msgId': i.msgId, 'linkman': i.linkman, 'contactWay': i.contactWay, 'price': i.price,'detail': i.detail, 'img': i.img, 'post_date': str(i.post_date), 'last_date': str(i.last_date)}
list_meg.append(dict)
count += 1
if count >= 20:
break
result['data'] = list_meg
print(json.dumps(result, ensure_ascii=False))
return HttpResponse(json.dumps(result, ensure_ascii=False))
else:
result['data'] = list_meg
print(json.dumps(result, ensure_ascii=False))
return HttpResponse(json.dumps(result, ensure_ascii=False))
def returnMyList(request,category,sort="default"):#提交信息 二手信息、任务信息 排序方式
result = {
"status": 500,
"message": None,
"data": None
}
list_meg=[]
if request.method == "POST":
userID = request.POST["userID"]
qs = Message.objects.filter(userID=userID,isDel=False)
count = 0
for i in qs:
dict = {'msgId': i.msgId, 'linkman': i.linkman, 'contactWay': i.contactWay, 'price': i.price,'detail': i.detail, 'img': i.img, 'post_date': str(i.post_date), 'last_date': str(i.last_date)}
list_meg.append(dict)
count += 1
if count >= 20:
break
result['data'] = list_meg
print(json.dumps(result, ensure_ascii=False))
return HttpResponse(json.dumps(result, ensure_ascii=False))
else:
result['data'] = list_meg
print(json.dumps(result, ensure_ascii=False))
return HttpResponse(json.dumps(result, ensure_ascii=False))
def dele(request,category,sort="default"):#提交信息 二手信息、任务信息 排序方式
result = {
"status": 500,
"message": None,
"data": None
}
list_meg=[]
if request.method == "POST":
msgId = request.POST["msgId"]
qs = Message.objects.get(msgId=msgId)
qs.isDel=True
qs.save()
result['message'] = 'success'
print(json.dumps(result, ensure_ascii=False))
return HttpResponse(json.dumps(result, ensure_ascii=False))
else:
result['message'] = 'fail'
print(json.dumps(result, ensure_ascii=False))
return HttpResponse(json.dumps(result, ensure_ascii=False))
| {"/XianYuBack/authorize/views.py": ["/XianYuBack/authorize/models.py"]} |
52,006 | opsiff/djangowebsite | refs/heads/master | /XianYuBack/message/urls.py | from django.contrib import admin
from django.urls import path
from . import views
"""
路由到一下地址
/message/<category>/<id>/detail GET方式 拿到某一类的某个具体信息
/message/<categoyy>/submmit POST方式
/message/<category>/list/<sort> 某一类按照某种方式排序的前30列表
/message/<categoyy>/returnList POST方式
"""
app_name='message'
urlpatterns = [
path('<str:category>/<int:id>/detail',views.returnDetail,name='message'),
path('<str:category>/submmit',views.submmit,name='message'),
path('<str:category>/list/<int:sort>',views.returnList,name='message'),
path('<str:category>/returnList',views.returnList,name='message'),
path('<str:category>/returnimg',views.returnimg,name='message'),
path('<str:category>/search',views.search),
path('<str:category>/returnMyList',views.returnMyList),
path('<str:category>/dele',views.dele),
]
| {"/XianYuBack/authorize/views.py": ["/XianYuBack/authorize/models.py"]} |
52,007 | opsiff/djangowebsite | refs/heads/master | /XianYuBack/testApi.py | import requests
import json
import time
form = {
"muser": "04170324",
"passwd": "10086KKDDaaggccd"
}
def Login():
x = requests.post("http://122.112.159.211/authorize/loginIn", data=form)
print(x.text)
def LoginOut():
x = requests.post("http://127.0.0.1:8000/authorize/loginOut", data=form)
print(json.loads(x.text))
def upload():
try:
data={
"price": "11.2",
"phone":"1232",
"user":"041702324",
"detail":"1112"
}
files=[("img",open('./2.jpg', 'rb')),("img",open('./1.jpg', 'rb'))]
x = requests.post("http://127.0.0.1:8000/message/1/submmit",data=data,files=files)
print(json.loads(x.text, encoding='utf-8'))
except Exception as e:
print(e)
if __name__=="__main__":
# upload()
Login()
# LoginOu
# t()
# print(int(round(time.time() * 1000)))
| {"/XianYuBack/authorize/views.py": ["/XianYuBack/authorize/models.py"]} |
52,008 | opsiff/djangowebsite | refs/heads/master | /XianYuBack/message/models.py | from django.db import models
# Create your models here.
class Message(models.Model):
msgId=models.CharField(max_length=30,primary_key=True) #信息ID,
linkman=models.CharField(max_length=20) #
userID=models.CharField(max_length=20,default='123456789')
contactWay=models.CharField(max_length=30)
price=models.FloatField()
# title=models.CharField(max_length=40) #标题不超过20字
detail=models.CharField(max_length=240) #细节描述不超过120字
img=models.CharField(max_length=200)
post_date=models.DateTimeField(auto_now_add=True) #提交日期
last_date=models.DateTimeField(auto_now=True) #最后修改日期
isDel=models.BooleanField(default=False)
def __str__(self):
listmsg={}
listmsg['msgId'] = self.msgId
listmsg['linkman'] = self.linkman
listmsg['contactWay'] = self.contactWay
listmsg['price'] = self.price
listmsg['detail'] = self.detail
listmsg['img'] = self.img
listmsg['post_date'] = self.post_date
listmsg['last_date'] = self.last_date
return str(listmsg)
| {"/XianYuBack/authorize/views.py": ["/XianYuBack/authorize/models.py"]} |
52,009 | opsiff/djangowebsite | refs/heads/master | /XianYuBack/authorize/urls.py | from django.contrib import admin
from django.urls import path
from . import views
"""
路由到一下地址
/authorize/loginIn 登入
/authorize/loginOut 注销
"""
app_name='authorize'
urlpatterns = [
path('loginIn',views.loginIn),
path('loginOut',views.loginOut)
]
| {"/XianYuBack/authorize/views.py": ["/XianYuBack/authorize/models.py"]} |
52,010 | opsiff/djangowebsite | refs/heads/master | /XianYuBack/authorize/models.py | from django.db import models
# Create your models here.
class OnlineUser(models.Model):#维护一个token表
account = models.CharField(max_length=9, primary_key=True)
token = models.CharField(max_length=27)
class User(models.Model): #已经注册的用户
account=models.CharField(max_length=9,primary_key=True)
passwd=models.CharField(max_length=20)
def __str__(self):
return self.account
class PersonalInformatin(models.Model): #用户个人信息存储
name=models.CharField(max_length=20,default="赵四")
number=models.CharField(max_length=9,primary_key=True,default=123456789)
sex=models.CharField(max_length=4,default="男")
birth=models.CharField(max_length=10,default='string')
college=models.CharField(max_length=30 ,default='SOME STRING')
major=models.CharField(max_length=30,default='SOME STRING')
grade=models.IntegerField(default=0)
| {"/XianYuBack/authorize/views.py": ["/XianYuBack/authorize/models.py"]} |
52,011 | opsiff/djangowebsite | refs/heads/master | /XianYuBack/authorize/utils/Token.py | import time
def get_token(userNumber):
t=time.time()
x=int(round(t*1000000))
token=str(x)+userNumber
return token
# 1574842175560572041702324
# 1574842197056093041702324
if __name__=="__main__":
print(get_token('041702324')) | {"/XianYuBack/authorize/views.py": ["/XianYuBack/authorize/models.py"]} |
52,012 | opsiff/djangowebsite | refs/heads/master | /XianYuBack/utils/common.py | import hashlib
def hashFile(file):
pass | {"/XianYuBack/authorize/views.py": ["/XianYuBack/authorize/models.py"]} |
52,013 | opsiff/djangowebsite | refs/heads/master | /XianYuBack/authorize/migrations/0001_initial.py | # Generated by Django 2.2.7 on 2019-11-28 13:51
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='OnlineUser',
fields=[
('account', models.CharField(max_length=9, primary_key=True, serialize=False)),
('token', models.CharField(max_length=27)),
],
),
migrations.CreateModel(
name='PersonalInformatin',
fields=[
('name', models.CharField(default='赵四', max_length=20)),
('number', models.CharField(default=123456789, max_length=9, primary_key=True, serialize=False)),
('sex', models.CharField(default='男', max_length=4)),
('birth', models.CharField(default='string', max_length=10)),
('college', models.CharField(default='SOME STRING', max_length=30)),
('major', models.CharField(default='SOME STRING', max_length=30)),
('grade', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='User',
fields=[
('account', models.CharField(max_length=9, primary_key=True, serialize=False)),
('passwd', models.CharField(max_length=20)),
],
),
]
| {"/XianYuBack/authorize/views.py": ["/XianYuBack/authorize/models.py"]} |
52,018 | rlr/fjord | refs/heads/master | /fjord/base/tests/test_forms.py | from django.core.exceptions import ValidationError
from django.test import TestCase
from fjord.base.forms import EnhancedURLField
from fjord.base.tests import eq_
class EnhancedURLFieldTests(TestCase):
def test_valid(self):
test_data = [
# expected, url
('about:mozilla', 'about:mozilla'),
('chrome://foo', 'chrome://foo'),
('ftp://example.com/', 'ftp://example.com'),
# From Django's URLField test data
('http://localhost/', 'http://localhost'),
('http://example.com/', 'http://example.com'),
('http://example.com./', 'http://example.com.'),
('http://www.example.com/', 'http://www.example.com'),
('http://www.example.com:8000/test',
'http://www.example.com:8000/test'),
('http://valid-with-hyphens.com/', 'valid-with-hyphens.com'),
('http://subdomain.domain.com/', 'subdomain.domain.com'),
('http://200.8.9.10/', 'http://200.8.9.10'),
('http://200.8.9.10:8000/test', 'http://200.8.9.10:8000/test'),
('http://valid-----hyphens.com/', 'http://valid-----hyphens.com'),
('http://www.example.com/s/http://code.djangoproject.com/tkt/13',
'www.example.com/s/http://code.djangoproject.com/tkt/13'),
]
f = EnhancedURLField()
for expected, url in test_data:
try:
eq_(f.clean(url), expected)
except ValidationError:
print url
raise
def test_invalid(self):
test_data = [
# From Django's URLField test data
('This field is required.', ''),
('This field is required.', None),
('Enter a valid URL.', 'foo'),
('Enter a valid URL.', 'http://'),
('Enter a valid URL.', 'http://example'),
('Enter a valid URL.', 'http://example.'),
('Enter a valid URL.', 'com.'),
('Enter a valid URL.', '.'),
('Enter a valid URL.', 'http://.com'),
('Enter a valid URL.', 'http://invalid-.com'),
('Enter a valid URL.', 'http://-invalid.com'),
('Enter a valid URL.', 'http://inv-.alid-.com'),
('Enter a valid URL.', 'http://inv-.-alid.com'),
('Enter a valid URL.', '[a'),
('Enter a valid URL.', 'http://[a'),
]
f = EnhancedURLField()
for msg, url in test_data:
try:
f.clean(url)
except ValidationError as exc:
eq_(exc.messages, [msg])
| {"/fjord/feedback/tests/test_models.py": ["/fjord/search/tests/__init__.py"]} |
52,019 | rlr/fjord | refs/heads/master | /fjord/base/models.py | import json
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _lazy
from fjord.base import forms
from fjord.base.validators import EnhancedURLValidator
# Common base model for all fjord models
ModelBase = models.Model
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
class EnhancedURLField(models.CharField):
"""URLField that also supports about: and chrome:// urls"""
description = 'Enhanced URL'
def __init__(self, verbose_name=None, name=None, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
models.CharField.__init__(self, verbose_name, name, **kwargs)
self.validators.append(EnhancedURLValidator())
def formfield(self, **kwargs):
defaults = {
'form_class': forms.EnhancedURLField,
}
defaults.update(kwargs)
return super(EnhancedURLField, self).formfield(**defaults)
def deconstruct(self):
name, path, args, kwargs = super(EnhancedURLField, self).deconstruct()
# Don't serialize the default value which allows us to change
# default values later without the serialized form changing.
if kwargs.get('max_length', None) == 200:
del kwargs['max_length']
return name, path, args, kwargs
class JSONObjectField(models.Field):
"""Represents a JSON object.
Note: This might be missing a lot of Django infrastructure to
work correctly across edge cases. Also it was tested with MySQL
and no other db backends.
"""
empty_strings_allowed = False
description = _lazy(u'JSON Object')
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
# "default" should default to an empty JSON dict. We implement
# that this way rather than getting involved in the
# get_default/has_default Field machinery since this makes it
# easier to subclass.
kwargs['default'] = kwargs.get('default', {})
super(JSONObjectField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return 'TextField'
def pre_init(self, value, obj):
if obj._state.adding:
if isinstance(value, basestring):
return json.loads(value)
return value
def to_python(self, value):
if isinstance(value, basestring):
return json.loads(value)
return value
def get_db_prep_value(self, value, connection, prepared=False):
if self.null and value is None:
return None
return json.dumps(value, sort_keys=True)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return self.get_db_prep_value(val, None)
def value_from_object(self, obj):
value = super(JSONObjectField, self).value_from_object(obj)
if self.null and value is None:
return None
return json.dumps(value)
def get_default(self):
if self.has_default():
if callable(self.default):
return self.default()
return self.default
if self.null:
return None
return {}
def deconstruct(self):
name, path, args, kwargs = super(JSONObjectField, self).deconstruct()
# Don't serialize the default value which allows us to change
# default values later without the serialized form changing.
if kwargs.get('default', None) == {}:
del kwargs['default']
return name, path, args, kwargs
| {"/fjord/feedback/tests/test_models.py": ["/fjord/search/tests/__init__.py"]} |
52,020 | rlr/fjord | refs/heads/master | /fjord/suggest/tests/test_dummy.py | import datetime
from fjord.base.tests import eq_, TestCase
from fjord.feedback.tests import ResponseFactory
from fjord.suggest import get_suggesters
from fjord.suggest.utils import get_suggestions
from fjord.suggest.providers.dummy import DummySuggester
from fjord.suggest.tests import SuggesterTestMixin
class DummySuggesterLoadingTestCase(SuggesterTestMixin, TestCase):
suggesters = []
def test_didnt_load(self):
dummy_providers = [
prov for prov in get_suggesters()
if isinstance(prov, DummySuggester)
]
eq_(len(dummy_providers), 0)
class DummySuggesterTestCase(SuggesterTestMixin, TestCase):
suggesters = [
'fjord.suggest.providers.dummy.DummySuggester'
]
def test_load(self):
dummy_providers = [
prov for prov in get_suggesters()
if isinstance(prov, DummySuggester)
]
eq_(len(dummy_providers), 1)
def test_get_suggestions(self):
now = u'ts_{0}'.format(datetime.datetime.now())
req = self.get_feedback_post_request({
'happy': 1,
'description': now,
'url': u'http://example.com/{0}'.format(now)
})
feedback = ResponseFactory(
happy=True,
description=now,
url=u'http://example.com/{0}'.format(now)
)
# Try with just the feedback
links = get_suggestions(feedback)
eq_(len(links), 1)
eq_(links[0].provider, 'dummy')
eq_(links[0].provider_version, 1)
eq_(links[0].cssclass, u'document')
eq_(links[0].summary, u'summary {0}'.format(now)),
eq_(links[0].description, u'description {0}'.format(now))
eq_(links[0].url, feedback.url)
# Now with the feedback and request
links = get_suggestions(feedback, req)
eq_(len(links), 1)
eq_(links[0].provider, 'dummy')
eq_(links[0].provider_version, 1)
eq_(links[0].cssclass, u'document')
eq_(links[0].summary, u'summary {0}'.format(now)),
eq_(links[0].description, u'description {0}'.format(now))
eq_(links[0].url, feedback.url)
| {"/fjord/feedback/tests/test_models.py": ["/fjord/search/tests/__init__.py"]} |
52,021 | rlr/fjord | refs/heads/master | /fjord/search/tests/__init__.py | import time
from django.conf import settings
import factory
from elasticsearch.exceptions import NotFoundError
from fjord.base.tests import BaseTestCase
from fjord.search.index import get_index, get_es
from fjord.search.models import Record
class ElasticTestCase(BaseTestCase):
"""Base class for Elastic Search tests, providing some conveniences"""
@classmethod
def setUpClass(cls):
super(ElasticTestCase, cls).setUpClass()
cls._old_es_index_prefix = settings.ES_INDEX_PREFIX
settings.ES_INDEX_PREFIX = settings.ES_INDEX_PREFIX + 'test'
@classmethod
def tearDownClass(cls):
super(ElasticTestCase, cls).tearDownClass()
# Restore old setting.
settings.ES_INDEX_PREFIX = cls._old_es_index_prefix
def setUp(self):
super(ElasticTestCase, self).setUp()
self.setup_indexes()
def tearDown(self):
super(ElasticTestCase, self).tearDown()
self.teardown_indexes()
def refresh(self, timesleep=0):
index = get_index()
# Any time we're doing a refresh, we're making sure that the
# index is ready to be queried. Given that, it's almost
# always the case that we want to run all the generated tasks,
# then refresh.
# TODO: uncomment this when we have live indexing.
# generate_tasks()
get_es().indices.refresh(index)
if timesleep > 0:
time.sleep(timesleep)
def setup_indexes(self, empty=False, wait=True):
"""(Re-)create ES indexes."""
from fjord.search.index import es_reindex_cmd
if empty:
# Removes the index and creates a new one with nothing in
# it (by abusing the percent argument).
es_reindex_cmd(percent=0)
else:
# Removes the index, creates a new one, and indexes
# existing data into it.
es_reindex_cmd()
self.refresh()
if wait:
get_es().cluster.health(wait_for_status='yellow')
def teardown_indexes(self):
es = get_es()
try:
es.indices.delete(get_index())
except NotFoundError:
# If we get this error, it means the index didn't exist
# so there's nothing to delete.
pass
class RecordFactory(factory.DjangoModelFactory):
class Meta:
model = Record
batch_id = 'ou812'
name = 'Frank'
| {"/fjord/feedback/tests/test_models.py": ["/fjord/search/tests/__init__.py"]} |
52,022 | rlr/fjord | refs/heads/master | /fjord/base/forms.py | from django.forms import fields
from fjord.base.validators import EnhancedURLValidator
class EnhancedURLField(fields.URLField):
"""URLField that also supports about: and chrome:// urls"""
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(EnhancedURLField, self).__init__(
max_length, min_length, *args, **kwargs)
# The last validator in the list is the URLValidator we don't
# like. Replace that one with ours.
self.validators[-1] = EnhancedURLValidator()
def to_python(self, value):
if value:
# Don't "clean" about: and chrome:// urls--just pass them
# through.
if value.startswith(('about:', 'chrome://')):
return value
return super(EnhancedURLField, self).to_python(value)
| {"/fjord/feedback/tests/test_models.py": ["/fjord/search/tests/__init__.py"]} |
52,023 | rlr/fjord | refs/heads/master | /fjord/feedback/tests/test_models.py | import datetime
from fjord.base.tests import eq_, TestCase
from fjord.feedback.config import TRUNCATE_LENGTH
from fjord.feedback.models import (
Product,
Response,
ResponseEmail,
ResponseContext,
ResponsePI,
ResponseMappingType,
purge_data
)
from fjord.feedback.tests import (
ResponseFactory,
ResponseEmailFactory,
ResponseContextFactory,
ResponsePIFactory
)
from fjord.feedback.utils import compute_grams
from fjord.journal.models import Record
from fjord.search.tests import ElasticTestCase
class TestResponseModel(TestCase):
def test_description_truncate_on_save(self):
# Extra 10 characters get lopped off on save.
resp = ResponseFactory(description=('a' * (TRUNCATE_LENGTH + 10)))
eq_(resp.description, 'a' * TRUNCATE_LENGTH)
def test_description_strip_on_save(self):
# Nix leading and trailing whitespace.
resp = ResponseFactory(description=u' \n\tou812\t\n ')
eq_(resp.description, u'ou812')
def test_url_domain(self):
# Test a "normal domain"
resp = ResponseFactory(url=u'http://foo.example.com.br/blah')
eq_(resp.url_domain, u'example.com.br')
assert isinstance(resp.url_domain, unicode)
# Test a unicode domain
resp = ResponseFactory(
url=u'http://\u30c9\u30e9\u30af\u30a810.jp/dq10_skillpoint.html')
eq_(resp.url_domain, u'\u30c9\u30e9\u30af\u30a810.jp')
assert isinstance(resp.url_domain, unicode)
def test_rating_to_happy(self):
"""Test that we do populate happy from rating"""
data = {
1: False,
2: False,
3: False,
4: True,
5: True
}
for rat, expected in data.items():
# Create the response, but DON'T save it to the db.
resp = ResponseFactory.build(happy=None, rating=rat)
resp.save()
eq_(resp.happy, expected)
def test_happy_to_rating(self):
"""Test we don't populate rating from happy"""
resp = ResponseFactory.build(happy=True, rating=None)
resp.save()
eq_(resp.rating, None)
resp = ResponseFactory.build(happy=False, rating=None)
resp.save()
eq_(resp.rating, None)
class TestAutoTranslation(TestCase):
def setUp(self):
# Wipe out translation system for all products.
# FIXME - might be better to save the state and restore it in tearDown
# rather than stomp in both cases. But stomping works for now.
Product.objects.update(translation_system=u'')
super(TestAutoTranslation, self).setUp()
def tearDown(self):
# Wipe out translation system for all products.
Product.objects.update(translation_system=u'')
super(TestAutoTranslation, self).tearDown()
def test_auto_translation(self):
prod = Product.objects.get(db_name='firefox')
prod.translation_system = u'dennis'
prod.save()
resp = ResponseFactory(
locale=u'es',
product=u'firefox',
description=u'hola'
)
# Fetch it from the db again
resp = Response.objects.get(id=resp.id)
eq_(resp.translated_description, u'\xabHOLA\xbb')
class TestGenerateTranslationJobs(TestCase):
def setUp(self):
# Wipe out translation system for all products.
# FIXME - might be better to save the state and restore it in tearDown
# rather than stomp in both cases. But stomping works for now.
Product.objects.update(translation_system=u'')
super(TestGenerateTranslationJobs, self).setUp()
def tearDown(self):
# Wipe out translation system for all products.
Product.objects.update(translation_system=u'')
super(TestGenerateTranslationJobs, self).tearDown()
def test_english_no_translation(self):
"""English descriptions should get copied over"""
resp = ResponseFactory(
locale=u'en-US',
description=u'hello',
translated_description=u''
)
# No new jobs should be generated
eq_(len(resp.generate_translation_jobs()), 0)
# Re-fetch from the db and make sure the description was copied over
resp = Response.objects.get(id=resp.id)
eq_(resp.description, resp.translated_description)
def test_english_gb_no_translation(self):
"""en-GB descriptions should get copied over"""
resp = ResponseFactory(
locale=u'en-GB',
description=u'hello',
translated_description=u''
)
# No new jobs should be generated
eq_(len(resp.generate_translation_jobs()), 0)
# Re-fetch from the db and make sure the description was copied over
resp = Response.objects.get(id=resp.id)
eq_(resp.description, resp.translated_description)
def test_english_with_dennis(self):
"""English descriptions should get copied over"""
resp = ResponseFactory(
locale=u'en-US',
product=u'firefox',
description=u'hello',
translated_description=u''
)
# Set the product up for translation *after* creating the response
# so that it doesn't get auto-translated because Response is set up
# for auto-translation.
prod = Product.objects.get(db_name='firefox')
prod.translation_system = u'dennis'
prod.save()
# No new jobs should be generated
eq_(len(resp.generate_translation_jobs()), 0)
# Re-fetch from the db and make sure the description was copied over
resp = Response.objects.get(id=resp.id)
eq_(resp.description, resp.translated_description)
def test_spanish_no_translation(self):
"""Spanish should not get translated"""
resp = ResponseFactory(
locale=u'es',
product=u'firefox',
description=u'hola',
translated_description=u''
)
# No jobs should be translated
eq_(len(resp.generate_translation_jobs()), 0)
# Nothing should be translated
eq_(resp.translated_description, u'')
def test_spanish_with_dennis(self):
"""Spanish should get translated"""
resp = ResponseFactory(
locale=u'es',
product=u'firefox',
description=u'hola',
translated_description=u''
)
# Set the product up for translation *after* creating the response
# so that it doesn't get auto-translated because Response is set up
# for auto-translation.
prod = Product.objects.get(db_name='firefox')
prod.translation_system = u'dennis'
prod.save()
# One job should be generated
jobs = resp.generate_translation_jobs()
eq_(len(jobs), 1)
job = jobs[0]
eq_(job[1:], (u'dennis', u'es', u'description',
u'en', 'translated_description'))
eq_(resp.translated_description, u'')
def test_spanish_with_dennis_and_existing_translations(self):
"""Response should pick up existing translation"""
existing_resp = ResponseFactory(
locale=u'es',
product=u'firefox',
description=u'hola',
translated_description=u'DUDE!'
)
resp = ResponseFactory(
locale=u'es',
product=u'firefox',
description=u'hola',
translated_description=u''
)
# Set the product up for translation *after* creating the response
# so that it doesn't get auto-translated because Response is set up
# for auto-translation.
prod = Product.objects.get(db_name='firefox')
prod.translation_system = u'dennis'
prod.save()
# No jobs should be translated
eq_(len(resp.generate_translation_jobs()), 0)
eq_(resp.translated_description, existing_resp.translated_description)
class TestComputeGrams(TestCase):
def test_empty(self):
eq_(compute_grams(u''), [])
def test_parsing(self):
# stop words are removed
eq_(compute_grams(u'i me him her'), [])
# capital letters don't matter
eq_(compute_grams(u'I ME HIM HER'), [])
# punctuation nixed
eq_(compute_grams(u'i, me, him, her'), [])
def test_bigrams(self):
# Note: Tokens look weird after being analyzed probably due to
# the stemmer. We could write a bunch of code to "undo" some
# of the excessive stemming, but it's probably an exercise in
# futility. Ergo the tests look a little odd. e.g. "youtub"
# One word a bigram does not make
eq_(compute_grams(u'youtube'), [])
# Two words is the minimum number to create a bigram
eq_(sorted(compute_grams(u'youtube crash')),
['crash youtube'])
# Three words creates two bigrams
eq_(sorted(compute_grams(u'youtube crash flash')),
['crash flash', 'crash youtube'])
# Four words creates three bigrams
eq_(sorted(compute_grams(u'youtube crash flash bridge')),
['bridge flash', 'crash flash', 'crash youtube'])
# Nix duplicate bigrams
eq_(sorted(compute_grams(u'youtube crash youtube flash')),
['crash youtube', 'flash youtube'])
class TestParseData(ElasticTestCase):
def test_purge(self):
now = datetime.datetime.now()
cutoff = now - datetime.timedelta(days=5)
# Create 10 objs of each type--one for each day for the last
# 10 days.
for i in range(10):
ResponseEmailFactory(
opinion__created=(now - datetime.timedelta(days=i))
)
ResponseContextFactory(
opinion__created=(now - datetime.timedelta(days=i))
)
ResponsePIFactory(
opinion__created=(now - datetime.timedelta(days=i))
)
# Note that this creates 30 Response objects.
# Since creating the objects and indexing them happens very
# quickly in tests, we hit a race condition and the has_email
# column ends up being false. So instead we just drop the
# index and rebuild it.
self.setup_indexes()
# Make sure everything is in the db
eq_(Response.objects.count(), 30)
eq_(ResponseEmail.objects.count(), 10)
eq_(ResponseContext.objects.count(), 10)
eq_(ResponsePI.objects.count(), 10)
# Make sure everything is in the index
resp_s = ResponseMappingType.search()
eq_(resp_s.count(), 30)
eq_(resp_s.filter(has_email=True).count(), 10)
# Now purge everything older than 5 days and make sure things
# got removed that should have gotten removed. Also check if
# there is a journal entry for the purge operation.
cutoff = now - datetime.timedelta(days=5)
purge_data(cutoff=cutoff)
self.refresh()
eq_(Response.objects.count(), 30)
eq_(ResponseEmail.objects.count(), 5)
eq_(ResponseEmail.objects.filter(
opinion__created__gte=cutoff).count(),
5)
eq_(ResponseContext.objects.count(), 5)
eq_(ResponseContext.objects.filter(
opinion__created__gte=cutoff).count(),
5)
eq_(ResponsePI.objects.count(), 5)
eq_(ResponsePI.objects.filter(
opinion__created__gte=cutoff).count(),
5)
eq_(1,
Record.objects.filter(action='purge_data').count())
expected_msg = ('feedback_responseemail: 5, '
'feedback_responsecontext: 5, '
'feedback_responsepi: 5')
eq_(expected_msg,
Record.objects.get(action='purge_data').msg)
# Everything should still be in the index, but the number of
# things with has_email=True should go down
resp_s = ResponseMappingType.search()
eq_(resp_s.count(), 30)
eq_(resp_s.filter(has_email=True).count(), 5)
| {"/fjord/feedback/tests/test_models.py": ["/fjord/search/tests/__init__.py"]} |
52,033 | pedantix/corona | refs/heads/master | /corona/plotting.py | from bokeh.plotting import figure, show
from bokeh.embed import file_html
from bokeh.resources import CDN
from collections import Counter
from corona.selector import Selector
def get_counts_by_country(jh_data, field, selector=None):
if selector is None:
selector = Selector()
count = Counter()
for record in jh_data:
if not selector(record):
continue
count[record['report_date']] += record[field]
dates, counts = zip(*sorted(count.items()))
return dates, counts
def get_diff(counts):
counts_padded = [0] + list(counts)
diff = []
for i in range(1, len(counts_padded)):
diff.append(counts_padded[i] - counts_padded[i-1])
return diff
def plot(jh_data, selector=None, delta=False, title=None, y_log=False, raw_html=False):
if selector is None:
selector = Selector()
if title is None:
title = selector.get_title()
y_axis_type = "linear"
if y_log:
y_axis_type = "log"
fig = figure(x_axis_type="datetime", title=title, width=800, height=600, y_axis_type=y_axis_type)
fig.yaxis.axis_label = '# '
if delta:
fig.yaxis.axis_label = '# / day'
else:
fig.yaxis.axis_label = '# / cumulative'
fields = [('confirmed', 'blue'),
('recovered', 'green'),
('deaths', 'red')]
for field, color in fields:
dates, counts = get_counts_by_country(jh_data, field, selector=selector)
if delta:
counts = get_diff(counts)
fig.line(dates, counts, legend=field, color=color, line_width=3)
fig.circle(dates, counts, alpha=0.2, color=color)
fig.legend.location = "top_left"
# bokeh makes html, see if we can optionally return html
if raw_html == False:
show(fig)
else:
return file_html(fig, CDN)
| {"/corona/read_data.py": ["/corona/file_names.py"]} |
52,034 | pedantix/corona | refs/heads/master | /corona/read_data.py | from csv import DictReader
from glob import glob
from datetime import datetime
from corona.file_names import file_names
def read_data_raw():
filename = file_names['ts_data']
return list(DictReader(open(filename, 'r', encoding='utf-8-sig')))
def get_jh_files():
return sorted(glob(file_names['jh_dir_update']+'/*2020.csv'))
def str2float(string):
if string == '':
return 0
return int(string)
def process_jh_record(record):
for tag in ['Confirmed', 'Recovered', 'Deaths']:
record[tag.lower()] = str2float(record[tag])
del record[tag]
record['last_update'] = record['Last Update']
del record['Last Update']
record['report_date'] = datetime.strptime(record['report_date_string'], '%m-%d-%Y')
record['country'] = record['Country/Region']
record['province'] = record['Province/State']
del record['Country/Region'], record['Province/State']
def read_jh_data():
files = get_jh_files()
data = []
for file in files:
report_data = list(DictReader(open(file, 'r', encoding='utf-8-sig')))
for line in report_data:
report_date = file.split('/')[-1].split('.csv')[0]
line['report_date_string'] = report_date
process_jh_record(line)
report_data = [dict(line) for line in report_data]
data.extend(report_data)
return data
def get_countries(jh_data=None, province=False):
if jh_data is None:
jh_data = read_jh_data()
sep = ' // '
if province:
def func(x):
return x['country'] + sep + x['province']
else:
def func(x):
return x['country']
return sorted({func(r) for r in jh_data})
def is_europe(country):
countries = {'France', 'Spain', 'UK', 'Sweden', 'Switzerland', 'Italy',
'Portugal', 'Germany', 'Romania', 'North Ireland',
'Netherlands', 'Norway', 'Luxembourg', 'Ireland',
'Iceland', 'Greece', 'Finland', 'Denmark', 'Croatia',
'Belgium', 'Austria', 'Andora'}
return country in countries
| {"/corona/read_data.py": ["/corona/file_names.py"]} |
52,035 | pedantix/corona | refs/heads/master | /corona/file_names.py | """
Keep all filename/path logic here rather than polluting code with hardcoded paths
"""
import os
data_dir_default = 'data'
tmp_dir = 'tmp'
data_dir = os.getenv('CORONA_DATA_DIR', data_dir_default)
novel_dir = "%s/novel" % data_dir
jh_dir = "%s/jh/COVID-19-master_2020_03_03/csse_covid_19_data/csse_covid_19_daily_reports" % data_dir
file_names = {'ts_data': "%s/COVID19_open_line_list.csv" % novel_dir,
'jh_dir': jh_dir,
'tmp_dir': tmp_dir,
'jh_tmp_dir': "%s/jh_data_dump" % jh_dir,
'jh_sub_dir': "COVID-19-master/csse_covid_19_data/csse_covid_19_daily_reports",
'jh_dir_update': "%s/updates" % data_dir}
| {"/corona/read_data.py": ["/corona/file_names.py"]} |
52,052 | Brian-Doucet/zillow | refs/heads/master | /zillow.py | #! python
#!/usr/bin/python3
"""Program to fetch data on sold homes from Zillow"""
import csv
import json
from typing import List
from bs4 import BeautifulSoup
import requests
from models.zillow import ZillowAddress, ZillowData, ZillowRequest
from utils import get_home_details, get_facts_and_features
class ZillowScraper():
def __init__(self):
self.results: List[ZillowData] = []
self.output_file = 'zillow_listings.csv'
def fetch(self, zillow_request: ZillowRequest):
response = requests.get(url=zillow_request.url,
headers=zillow_request.headers,
params=zillow_request.params)
return response
def get_zillow_urls_per_property(self, response) -> List[str]:
list_of_urls = []
content = BeautifulSoup(response, features='lxml')
property_cards = content.find(
'ul', {'class': 'photo-cards photo-cards_wow photo-cards_short'}
)
for child_property in property_cards:
property_data = child_property.find(
'script', {'type': 'application/ld+json'}
)
if property_data:
property_data_json = json.loads(property_data.contents[0])
print(property_data_json)
list_of_urls.append(
property_data_json['url']
)
return list_of_urls
def get_property_details(self, url: str) -> ZillowData:
zillow_request = ZillowRequest(url=url)
response = self.fetch(zillow_request)
content = BeautifulSoup(response.text, features='lxml')
home_details = get_home_details(content)
facts_features = get_facts_and_features(content)
zillow_data_object = ZillowData(
zpid=home_details.get("zpid"),
property_name=home_details.get("name"),
street_address=home_details.get("address").get("streetAddress"),
city=home_details.get("address").get("addressLocality"),
state=home_details.get("address").get("addressRegion"),
zip_code=home_details.get("address").get("postalCode"),
latitude=home_details.get("geo").get("latitude"),
longitude=home_details.get("geo").get("longitude"),
property_type=facts_features.get("home_type"),
lot_size=facts_features.get("lot_size"),
year_built=facts_features.get("year_built"),
square_footage=home_details.get("floorSize").get("value"),
total_interior_livable_area=facts_features.get("total_interior_livable_area"),
price_per_sqft=facts_features.get("price/sqft"),
stories=facts_features.get("stories"),
foundation=facts_features.get("foundation"),
roof=facts_features.get("roof"),
new_construction=facts_features.get("new_construction"),
bedrooms=facts_features.get("bedrooms"),
bathrooms=facts_features.get("bathrooms"),
full_bathrooms=facts_features.get("full_bathrooms"),
flooring=facts_features.get("flooring"),
basement=facts_features.get("basement"),
fireplace=facts_features.get("fireplace"),
parking=facts_features.get("parking"),
garage=facts_features.get("has_garage"),
garage_spaces=facts_features.get("garage_spaces"),
heating=facts_features.get("heating"),
cooling=facts_features.get("cooling"),
hoa_dues=facts_features.get("hoa"),
tax_assessed_value=facts_features.get("tax_assessed_value"),
annual_tax_amount=facts_features.get("annual_tax_amount"),
property_url=home_details.get("url")
)
return zillow_data_object
def parse(self, response):
content = BeautifulSoup(response, features='lxml')
property_cards = content.find(
'ul', {'class': 'photo-cards photo-cards_wow photo-cards_short'})
for child_property in property_cards:
property_data = child_property.find(
'script', {'type': 'application/ld+json'})
if property_data:
property_data_json = json.loads(property_data.contents[0])
url_for_property = property_data_json['url']
follow_up_request = ZillowRequest(url=url_for_property)
response = self.fetch(follow_up_request)
content = BeautifulSoup(response.text, features='lxml')
break
def write_to_csv(self):
with open(self.output_file, 'w') as csv_file:
writer = csv.DictWriter(
csv_file, fieldnames=self.results[0].header_fields())
writer.writeheader()
for row in self.results:
writer.writerow(row.dict())
def run(self):
request = ZillowRequest()
response = self.fetch(request)
urls = self.get_zillow_urls_per_property(response.text)
for property_url in urls:
self.results.append(self.get_property_details(property_url))
self.write_to_csv()
if __name__ == '__main__':
scraper = ZillowScraper()
scraper.run()
| {"/zillow.py": ["/models/zillow.py", "/utils.py"]} |
52,053 | Brian-Doucet/zillow | refs/heads/master | /models/zillow.py | from typing import Optional, Dict, Any
from pydantic import BaseModel
from constants import ZILLOW_URL, ZILLOW_QUERY_PARAMS, ZILLOW_QUERY_HEADERS
class ZillowAddress(BaseModel):
def header_fields(self):
return self.__fields__
class ZillowData(BaseModel):
zpid: Optional[str]
property_url: Optional[str]
property_name: Optional[str]
street_address: Optional[str]
city: Optional[str]
state: Optional[str]
zip_code: Optional[str]
latitude: Optional[str]
longitude:Optional[str]
#address:Optional[ZillowAddress]
property_type: Optional[str]
lot_size: Optional[str]
year_built: Optional[str]
square_footage: Optional[str]
total_interior_livable_area: Optional[str]
price_per_sqft: Optional[str]
stories: Optional[str]
foundation: Optional[str]
roof: Optional[str]
new_construction:Optional[str]
bedrooms:Optional[str]
bathrooms:Optional[str]
full_bathrooms: Optional[str]
flooring: Optional[str]
basement: Optional[str]
fireplace: Optional[str]
parking: Optional[str]
garage: Optional[str]
garage_spaces: Optional[str]
heating: Optional[str]
cooling: Optional[str]
hoa_dues: Optional[str]
tax_assessed_value: Optional[str]
annual_tax_amount: Optional[str]
sale_price: Optional[str]
def header_fields(self):
return self.__fields__
class ZillowRequest(BaseModel):
url: Optional[str] = ZILLOW_URL
params: Optional[Dict] = ZILLOW_QUERY_PARAMS
headers: Optional[Dict] = ZILLOW_QUERY_HEADERS
def add_params(self, params:dict):
self.params.update(params)
| {"/zillow.py": ["/models/zillow.py", "/utils.py"]} |
52,054 | Brian-Doucet/zillow | refs/heads/master | /utils.py | #!/usr/bin/python3
"""Set of functions for parsing HTML from Zillow"""
import json
import re
from bs4 import BeautifulSoup
# HTML output from an example property listing. Saves sending repeated requests
# when testing these functions.
sample_listing_html = open("zillow_sample.txt", "r", encoding='utf-8').read()
content = BeautifulSoup(sample_listing_html, "lxml")
def parse_text(text, delimiter, position):
"""Return part of a string split on a specific delimiter
Args:
text (str): The text being parsed
delimiter (str): Delimiter to split on
position (int): The index for the slice of the string to return
Returns:
str: A portion of a text string
"""
new_text = text.split(delimiter)[position]
return new_text
def get_zillow_property_id(url):
"""Get the unique identifier for each property listing on Zillow.
Args:
url (str): Zillow URL for a specific listing
Returns:
str: Zillow property id
"""
pattern = re.compile(r"(\d+)_zpid")
zpid = re.search(pattern, url).group(1)
return zpid
def get_home_details(content):
"""Get basic details of a property listing on Zillow
Args:
content (str): HTML from the home details page of a specific listing
Returns:
Dict: A dictionary of JSON data
Data values include:
============= ==========================================================
Property Type The type of property (e.g. Single Family Residence).
Property Name The name of the property as it appears on the listing page.
Zillow ID Unique identifier on Zillow for each property listing.
Square Footage The total area of the home, measured in feet.
# of Rooms Total number rooms.
Geo Coordinates Both the latitude and longitude of the property.
URL The URL for each property listing.
Full Address Includes number, street name, city, state, zip code.
Street Address The street address portion only (e.g 123 Green Street).
City The name of the city.
State State abbreviation (e.g. TN for Tennessee).
Zip Code US only. Zillow does not publish international listings.
"""
property_details = content.find('script', {'type': 'application/ld+json'}).string
property_details_json = json.loads(property_details)
zillow_property_id = get_zillow_property_id(property_details_json.get("url"))
property_details_json["zpid"] = zillow_property_id
return property_details_json
def get_facts_and_features(content):
"""Get data from the facts and features section of the property listing.
Not all property listings will include each feature.
Args:
content (str): HTML from the home details page of a property listing
Returns:
Dict: A dictionary of property features
Data values include:
============= ==========================================================
Type Property type category (e.g. Townhouse, MultiFamily).
Year Built Four-digit year representing when the property was built.
Price/sqft Price divided by the square footage.
Lot Size Total size of the lot, measured in acres.
Heating The source used for heating (e.g. Central).
Cooling The source used to cool the property, if any (e.g. Central Air).
Parking Type of parking available (e.g. Garage Faces Front)
HOA Homeowner's association dues, if any.
"""
facts_features = content.find("div", {"class": "ds-home-facts-and-features reso-facts-features sheety-facts-features"})
items_list = [li.get_text(strip=True) for uls in facts_features.find_all("ul") for li in uls]
item_keys = ['_'.join(parse_text(item, ':', 0).split()).lower() for item in items_list]
item_values = [parse_text(item, ':', -1) for item in items_list]
return dict(zip(item_keys, item_values))
| {"/zillow.py": ["/models/zillow.py", "/utils.py"]} |
52,055 | vtt-info/eiscp-micropython | refs/heads/main | /src/eiscp/eiscp.py | import select
import socket
import time
import uasyncio
from .core import (
ISCPMessage,
command_to_iscp,
command_to_packet,
eISCPPacket,
filter_for_message,
iscp_to_command,
parse_info,
)
class eISCP:
"""Implements the eISCP interface to Onkyo receivers.
This uses a blocking interface. The remote end will regularily
send unsolicited status updates. You need to manually call
``get_message`` to query those.
You may want to look at the :meth:`Receiver` class instead, which
uses a background thread.
"""
ONKYO_PORT = 60128
CONNECT_TIMEOUT = 5
def __init__(self, host: str, port: int = 60128, debug: bool = False):
self.host = host
self.port = port
self._info = None
self.debug = debug
self.async_command_socket = None
self.command_socket = None
@property
def model_name(self) -> str:
if self.info and self.info.get("model_name"):
return self.info["model_name"]
else:
return "unknown-model"
@property
def identifier(self) -> str:
if self.info and self.info.get("identifier"):
return self.info["identifier"]
else:
return "no-id"
def __repr__(self) -> str:
if self.info and self.info.get("model_name"):
model = self.info["model_name"]
else:
model = "unknown"
string = "<{}({}) {}:{}>".format(self.__class__.__name__, model, self.host, self.port)
return string
@property
def info(self) -> "Dict[str, str]":
if not self._info:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setblocking(0)
sock.bind(("0.0.0.0", self.ONKYO_PORT))
sock.sendto(eISCPPacket("!xECNQSTN").get_raw(), (self.host, self.port))
ready = select.select([sock], [], [], 0.1)
if ready[0]:
data = sock.recv(1024)
self._info = parse_info(data)
sock.close()
return self._info
@info.setter
def info(self, value: "Dict[str, str]") -> None:
self._info = value
def _ensure_socket_connected(self) -> None:
if self.command_socket is None:
command_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
command_socket.settimeout(self.CONNECT_TIMEOUT)
command_socket.connect((self.host, self.port))
command_socket.setblocking(0)
self.command_socket = command_socket
self.async_command_socket = uasyncio.StreamWriter(command_socket)
def disconnect(self) -> None:
try:
self.command_socket.close()
except Exception:
pass
self.command_socket = None
def __enter__(self):
self._ensure_socket_connected()
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.disconnect()
async def send(self, iscp_message: str) -> None:
"""Send a low-level ISCP message, like ``MVL50``.
This does not return anything, nor does it wait for a response
from the receiver. You can query responses via :meth:`get`,
or use :meth:`raw` to send a message and waiting for one.
"""
self._ensure_socket_connected()
self.async_command_socket.write(command_to_packet(iscp_message))
await self.async_command_socket.drain()
async def get(self, timeout: int = 0.2) -> bytes:
"""Return the next message sent by the receiver, or, after
``timeout`` has passed, return ``None``.
"""
self._ensure_socket_connected()
start = time.ticks_ms()
header_bytes = b""
while start + int(timeout * 1000) > time.ticks_ms() and len(header_bytes) < 16:
self.dprint("Reading from socket for header")
result_payload = self.command_socket.read(16 - len(header_bytes))
if result_payload:
header_bytes += result_payload
self.dprint("Got {} header bytes".format(len(header_bytes)))
if len(header_bytes) < 16:
self.dprint("Complete header not retrieved, waiting for buffer to keep up")
await uasyncio.sleep_ms(1)
if len(header_bytes) < 16:
return None
header = eISCPPacket.parse_header(header_bytes)
self.dprint("Found ISCP header {}".format(header))
body = b""
start = time.ticks_ms()
while len(body) < header.data_size:
result = self.command_socket.read(header.data_size - len(body))
if result:
body += result
if start + timeout * 1000 < time.ticks_ms():
return None
elif len(body) < header.data_size:
await uasyncio.sleep_ms(1)
message = ISCPMessage.parse(body.decode())
self.dprint("Identified ISCP response: {}".format(message))
return message
async def raw(self, iscp_message, expect_response: bool = True):
"""Send a low-level ISCP message, like ``MVL50``, and wait
for a response.
While the protocol is designed to acknowledge each message with
a response, there is no fool-proof way to differentiate those
from unsolicited status updates, though we'll do our best to
try. Generally, this won't be an issue, though in theory the
response this function returns to you sending ``SLI05`` may be
an ``SLI06`` update from another controller.
It'd be preferable to design your app in a way where you are
processing all incoming messages the same way, regardless of
their origin.
"""
while await self.get(False):
# Clear all incoming messages. If not yet queried,
# they are lost. This is so that we can find the real
# response to our sent command later.
pass
await self.send(iscp_message)
if expect_response:
return await filter_for_message(self.get, iscp_message)
else:
while await self.get(False):
# Drain anything in the buffer
pass
return None
async def command(self, command: str, argument: str, expect_response: bool = True) -> "Optional[Tuple[str, str]]":
"""Send a high-level command to the receiver, return the
receiver's response formatted has a command.
This is basically a helper that combines :meth:`raw`,
:func:`command_to_iscp` and :func:`iscp_to_command`.
"""
iscp_message = command_to_iscp(command, argument)
response = await self.raw(iscp_message, expect_response)
if response:
return iscp_to_command(response)
return None
async def power_on(self) -> "Optional[Tuple[str, str]]":
"""Turn the receiver power on."""
return await self.command("PWR", "01")
async def power_off(self) -> "Optional[Tuple[str, str]]":
"""Turn the receiver power off."""
return await self.command("PWR", "00")
def dprint(self, item: str) -> None:
if self.debug:
print(item)
| {"/src/eiscp/eiscp.py": ["/src/eiscp/core.py"], "/src/eiscp/discovery.py": ["/src/eiscp/core.py", "/src/eiscp/eiscp.py"], "/src/eiscp/__init__.py": ["/src/eiscp/discovery.py", "/src/eiscp/eiscp.py"]} |
52,056 | vtt-info/eiscp-micropython | refs/heads/main | /src/eiscp/discovery.py | import socket
import time
import network
import uasyncio
from .core import eISCPPacket, parse_info
from .eiscp import eISCP
def get_current_ip_address() -> str:
wlan_network = network.WLAN(network.STA_IF)
if not wlan_network.isconnected():
raise OSError("Must connect to network first")
connection_details = wlan_network.ifconfig()
return connection_details[0]
def get_current_broadcast_address() -> str:
wlan_network = network.WLAN(network.STA_IF)
if not wlan_network.isconnected():
raise OSError("Must connect to network first")
connection_details = wlan_network.ifconfig()
ip_address = connection_details[0]
netmask = connection_details[1]
ip_address_segments = (int(item) for item in ip_address.split("."))
netmask_segments = (int(item) for item in netmask.split("."))
broadcast_segments = []
for (ip_address_segment, netmask_segment) in zip(ip_address_segments, netmask_segments):
if int(netmask_segment) == 255:
broadcast_segments.append(ip_address_segment)
else:
network_segment = ip_address_segment & netmask_segment
broadcast_segments.append(network_segment + (255 - netmask_segment))
return ".".join(str(item) for item in broadcast_segments)
discovery_lock = uasyncio.Lock()
async def discover(timeout: int = 5, clazz=None):
onkyo_magic = eISCPPacket("!xECNQSTN").get_raw()
pioneer_magic = eISCPPacket("!pECNQSTN").get_raw()
found_receivers = {}
broadcast_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
broadcast_socket.setblocking(0)
broadcast_address = get_current_broadcast_address()
own_address = get_current_ip_address()
async with discovery_lock:
try:
broadcast_socket.bind(
(own_address, eISCP.ONKYO_PORT)
) # The port doesn't matter. It is "0" in the original implementation. MicroPython doesn't support this.
broadcast_socket.sendto(onkyo_magic, (broadcast_address, eISCP.ONKYO_PORT))
broadcast_socket.sendto(pioneer_magic, (broadcast_address, eISCP.ONKYO_PORT))
start = time.ticks_ms()
while True:
ready = uasyncio.select.select([broadcast_socket], [], [], 0.01)
if not ready[0]:
await uasyncio.sleep_ms(100)
else:
data, addr = broadcast_socket.recvfrom(1024)
info = parse_info(data)
receiver = (clazz or eISCP)(addr[0], int(info["iscp_port"]))
receiver.info = info
found_receivers[info["identifier"]] = receiver
if start + timeout * 1000 < time.ticks_ms():
break
finally:
broadcast_socket.close()
return list(found_receivers.values())
| {"/src/eiscp/eiscp.py": ["/src/eiscp/core.py"], "/src/eiscp/discovery.py": ["/src/eiscp/core.py", "/src/eiscp/eiscp.py"], "/src/eiscp/__init__.py": ["/src/eiscp/discovery.py", "/src/eiscp/eiscp.py"]} |
52,057 | vtt-info/eiscp-micropython | refs/heads/main | /src/eiscp/__init__.py | # flake8: noqa: F401
from .discovery import discover
from .eiscp import eISCP
| {"/src/eiscp/eiscp.py": ["/src/eiscp/core.py"], "/src/eiscp/discovery.py": ["/src/eiscp/core.py", "/src/eiscp/eiscp.py"], "/src/eiscp/__init__.py": ["/src/eiscp/discovery.py", "/src/eiscp/eiscp.py"]} |
52,058 | vtt-info/eiscp-micropython | refs/heads/main | /src/eiscp/core.py | import re
import struct
import time
from collections import namedtuple
class ISCPMessage(object):
"""Deals with formatting and parsing data wrapped in an ISCP
containers. The docs say:
ISCP (Integra Serial Control Protocol) consists of three
command characters and parameter character(s) of variable
length.
It seems this was the original protocol used for communicating
via a serial cable.
"""
def __init__(self, data: str):
self.data = data
def __str__(self) -> str:
# ! = start character
# 1 = destination unit type, 1 means receiver
# End character may be CR, LF or CR+LF, according to doc
return "!1{}\r".format(self.data)
@classmethod
def parse(self, data: bytes) -> bytes:
EOF = "\x1a"
TERMINATORS = ["\n", "\r"]
assert data[:2] == "!1"
eof_offset = -1
# EOF can be followed by CR/LF/CR+LF
if data[eof_offset] in TERMINATORS:
eof_offset -= 1
if data[eof_offset] in TERMINATORS:
eof_offset -= 1
assert data[eof_offset] == EOF
return data[2:eof_offset]
class eISCPPacket(object):
"""For communicating over Ethernet, traditional ISCP messages are
wrapped inside an eISCP package.
"""
header = namedtuple("header", ("magic", "header_size", "data_size", "version", "reserved"))
def __init__(self, iscp_message: "Union[bytes, str]"):
iscp_message = str(iscp_message)
# We attach data separately, because Python's struct module does
# not support variable length strings,
header = struct.pack(
"!4sIIb3s",
b"ISCP", # magic
16, # header size (16 bytes)
len(iscp_message), # data size
0x01, # version
b"\x00\x00\x00", # reserved
)
if isinstance(iscp_message, str):
iscp_message = iscp_message.encode("utf-8")
self._bytes = header + iscp_message
# __new__, string subclass?
def __str__(self):
return self._bytes.decode("utf-8")
def get_raw(self):
return self._bytes
@classmethod
def parse(cls, bytes):
"""Parse the eISCP package given by ``bytes``."""
h = cls.parse_header(bytes[:16])
start_data = h.header_size
end_data = h.header_size + h.data_size
data = bytes[start_data:end_data].decode()
assert len(data) == h.data_size
return data
@classmethod
def parse_header(self, bytes):
"""Parse the header of an eISCP package.
This is useful when reading data in a streaming fashion,
because you can subsequently know the number of bytes to
expect in the packet.
"""
# A header is always 16 bytes in length
assert len(bytes) == 16
# Parse the header
magic, header_size, data_size, version, reserved = struct.unpack("!4sIIb3s", bytes)
magic = magic.decode()
reserved = reserved.decode()
# Strangly, the header contains a header_size field.
assert magic == "ISCP"
assert header_size == 16
return eISCPPacket.header(magic, header_size, data_size, version, reserved)
def parse_info(data: bytes) -> "Dict[str, str]":
response = eISCPPacket.parse(data)
# Return string looks something like this:
# !1ECNTX-NR609/60128/DX
matched = re.match(r"""!(\d)ECN([^/]*)/(\d\d\d\d\d)/(\w\w)/(.*)""", response.strip())
if matched is None:
return None
identifier = matched.group(5)
if len(identifier) > 12:
identifier = identifier[0:12]
info = {
"device_category": matched.group(1),
"model_name": matched.group(2),
"iscp_port": matched.group(3),
"area_code": matched.group(4),
"identifier": identifier,
}
return info
def command_to_packet(command: str) -> bytes:
"""
Convert an ascii command like (PVR00) to the binary data we
need to send to the receiver.
"""
return eISCPPacket(ISCPMessage(command)).get_raw()
async def filter_for_message(getter_func: "Callable[[], Optional[str]]", msg: str) -> str:
"""Helper that calls ``getter_func`` until a matching message
is found, or the timeout occurs. Matching means the same commands
group, i.e. for sent message MVLUP we would accept MVL13
in response."""
start = time.time()
while True:
candidate = await getter_func(0.05)
# It seems ISCP commands are always three characters.
if candidate and candidate[:3] == msg[:3]:
return candidate
# exception for HDMI-CEC commands (CTV) since they don't provide any response/confirmation
if "CTV" in msg[:3]:
return msg
# The protocol docs claim that a response should arrive
# within *50ms or the communication has failed*. In my tests,
# however, the interval needed to be at least 200ms before
# I managed to see any response, and only after 300ms
# reproducably, so use a generous timeout.
if time.time() - start > 1.5:
raise ValueError("Timeout waiting for response.")
def command_to_iscp(command: str, argument: str) -> str:
return "{}{}".format(command, argument)
def iscp_to_command(iscp_message: str) -> "Tuple[str, str]":
command, args = iscp_message[:3], iscp_message[3:]
return command, args
| {"/src/eiscp/eiscp.py": ["/src/eiscp/core.py"], "/src/eiscp/discovery.py": ["/src/eiscp/core.py", "/src/eiscp/eiscp.py"], "/src/eiscp/__init__.py": ["/src/eiscp/discovery.py", "/src/eiscp/eiscp.py"]} |
52,065 | NervenCid/omnibnk | refs/heads/master | /users/views.py | from django.shortcuts import render, redirect
#Importamos el modulo de mensajes
from django.contrib import messages
#Importamos
from .forms import UserRegisterForm
#Importamos los decoradores
from django.contrib.auth.decorators import login_required
#Creamos las vistas para usuario
#Registro
def register(request):
#Validamos el usuario
if request.method == 'POST':
#Creamos el formulario de registro
form = UserRegisterForm(request.POST)
if form.is_valid():
#Guardamos el usuario
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'Bienvenido usuario {username}, por favor proceda a hacer login')
return redirect('login')
else:
form = UserRegisterForm()
#Renderizamos en una vista
return render(request, 'users/register.html', {'form': form})
#Perfil del usuario
@login_required
def profile(request):
#Renderizamos
return render(request, 'users/profile.html')
| {"/movies/admin.py": ["/movies/models.py"], "/movies/views.py": ["/movies/models.py"]} |
52,066 | NervenCid/omnibnk | refs/heads/master | /movies/admin.py | from django.contrib import admin
#Importamos los modelos
from .models import Post
#Registramos los modelos dentro del 'admin'
admin.site.register(Post) | {"/movies/admin.py": ["/movies/models.py"], "/movies/views.py": ["/movies/models.py"]} |
52,067 | NervenCid/omnibnk | refs/heads/master | /movies/models.py | #Importamos el modulo para modelos
from django.db import models
#Importamos el modulos para autenticacion de usuarios
from django.contrib.auth.models import User
#Importamos el modulo 'reverse'
from django.urls import reverse
#Creamos el modelo de Post de las peliculas
class Post(models.Model):
movie_name = models.CharField(max_length=100)
image_url = models.CharField(max_length=1000)
director = models.CharField(max_length=100)
language = models.CharField(max_length=50)
date = models.CharField(max_length=10)
#Definimos al autor como llave foranea
author = models.ForeignKey(User, on_delete=models.CASCADE)
#
def __str__(self):
return self.movie_name
#Usamos la absolute_url para redireccionar apropiadamente despues de crear el post
def get_absolute_url(self):
return reverse('post-detail', kwargs={'pk': self.pk})
| {"/movies/admin.py": ["/movies/models.py"], "/movies/views.py": ["/movies/models.py"]} |
52,068 | NervenCid/omnibnk | refs/heads/master | /movies/views.py | #Importamos el modulo render
from django.shortcuts import render
#Importamos los 'mixin'
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
#Importamos el modulo 'HttpResponse'
from django.http import HttpResponse
#Importamos el modulo de lista de vistas
from django.views.generic import (
ListView,
DetailView,
CreateView,
UpdateView,
DeleteView
)
#Importamos los modelos
from .models import Post
#Creamos una lista dummy con los posts 'solo pruebas'
'''
posts = [
{
'movie_name': 'Alien',
'image_url': 'https://m.media-amazon.com/images/M/MV5BMmQ2MmU3NzktZjAxOC00ZDZhLTk4YzEtMDMyMzcxY2IwMDAyXkEyXkFqcGdeQXVyNzkwMjQ5NzM@._V1_.jpg',
'director': 'Ridley Scott',
'language': 'English',
'date':'1979',
},
{
'movie_name': 'The Terminator',
'image_url': 'https://m.media-amazon.com/images/M/MV5BYTViNzMxZjEtZGEwNy00MDNiLWIzNGQtZDY2MjQ1OWViZjFmXkEyXkFqcGdeQXVyNzkwMjQ5NzM@._V1_.jpg',
'director': 'James Cameron',
'language': 'English',
'date': '1984',
},
{
'movie_name': 'Amelie',
'image_url': 'http://www.montmartre-guide.com/wp-content/uploads/2014/07/amelie_affiche.jpg',
'director': 'Jean-Pierre Jeunet',
'language': 'French',
'date': '2001',
},
]
'''
#Creamos las vistas
#Vista 'home'
def home(request):
#Asignamos el contexto
context = {
#Pasamos los datos al contexto
'posts': Post.objects.all()
}
#Renderizamos el archivo 'home.html'
return render(request, 'movies/home.html', context)
#Vista 'about'
def about(request):
#Renderizamos el archivo 'about.html'
return render(request, 'movies/about.html')
#Lista de vistas
class PostListView(ListView):
model = Post
#<app>/<model>_<viewtype>.html
template_name = 'movies/home.html'
context_object_name = 'posts'
#Ordenamos los posts
ordering = ['-date']
#Vista en detalle
class PostDetailView(DetailView):
model = Post
#Vista para crear un post
class PostCreateView(LoginRequiredMixin, CreateView):
model = Post
fields = ['movie_name', 'image_url', 'director', 'language', 'date']
#Validamos
def form_valid(self, form):
form.instance.author = self.request.user
#Ignorar el error de super
return super().form_valid(form)
#Vista para editar un post
class PostUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Post
fields = ['movie_name', 'image_url', 'director', 'language', 'date']
#Validamos
def form_valid(self, form):
form.instance.author = self.request.user
#Ignorar el error de super
return super().form_valid(form)
#Hacemos un testeo para prevenir que otros usuarios editen post que no fueron creados por ellos
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
return False
#Vista para eliminar un post
class PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Post
#Redirigimos al 'home'
success_url = '/'
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
return False
| {"/movies/admin.py": ["/movies/models.py"], "/movies/views.py": ["/movies/models.py"]} |
52,071 | samueldaviddelacruz/restaurant_graphql_CRUD | refs/heads/master | /restaurant_graphql_api/schema.py | import graphene
import restaurantapi.schema
class Query(restaurantapi.schema.Query,graphene.ObjectType):
# This class will inherit from multiple Queries
# as we begin to add more apps to our project
pass
class Mutations(restaurantapi.schema.Mutation,graphene.ObjectType):
# This class will inherit from multiple Queries
# as we begin to add more apps to our project
pass
schema = graphene.Schema(query=Query,mutation=Mutations) | {"/restaurant_graphql_api/schema.py": ["/restaurantapi/schema.py"], "/restaurantapi/schema.py": ["/restaurantapi/models.py"]} |
52,072 | samueldaviddelacruz/restaurant_graphql_CRUD | refs/heads/master | /restaurantapi/models.py | from django.db import models
# Create your models here.
class Category(models.Model):
name = models.TextField(null=False, blank=False)
description = models.TextField(null=True, blank=True)
def __str__(self):
return self.name
class Dish(models.Model):
category = models.ForeignKey(Category,on_delete=models.CASCADE,related_name="category")
name = models.TextField()
description = models.TextField(null=True, blank=True)
price = models.DecimalField(decimal_places=2,max_digits=6,default=0.0)
def __str__(self):
return self.name
| {"/restaurant_graphql_api/schema.py": ["/restaurantapi/schema.py"], "/restaurantapi/schema.py": ["/restaurantapi/models.py"]} |
52,073 | samueldaviddelacruz/restaurant_graphql_CRUD | refs/heads/master | /restaurantapi/schema.py | import graphene
from graphene_django.types import DjangoObjectType
from restaurantapi.models import Category,Dish
class CategoryType(DjangoObjectType):
class Meta:
model = Category
class DishType(DjangoObjectType):
class Meta:
model = Dish
class Query(object):
category = graphene.Field(CategoryType,
id=graphene.Int(),
name=graphene.String())
all_categories = graphene.List(CategoryType)
dish = graphene.Field(DishType,
id=graphene.Int(),
name=graphene.String())
all_dishes = graphene.List(DishType)
all_dishes_by_category = graphene.List(DishType,category_id=graphene.Int())
def resolve_all_categories(self, info, **kwargs):
return Category.objects.all()
def resolve_all_dishes(self, info, **kwargs):
# We can easily optimize query count in the resolve method
return Dish.objects.select_related('category').all()
def resolve_all_dishes_by_category(self, info, **kwargs):
id = kwargs.get('category_id')
if id is not None:
return Dish.objects.select_related('category').filter(category=id)
return None
def resolve_category(self, info, **kwargs):
id = kwargs.get('id')
name = kwargs.get('name')
if id is not None:
return Category.objects.get(pk=id)
if name is not None:
return Category.objects.get(name=name)
return None
def resolve_dish(self, info, **kwargs):
id = kwargs.get('id')
name = kwargs.get('name')
if id is not None:
return Dish.objects.get(pk=id)
if name is not None:
return Dish.objects.get(name=name)
return None
class CreateCategoryInput(graphene.InputObjectType):
name = graphene.String(required=True)
description = graphene.String(required=False)
class CreateCategory(graphene.Mutation):
class Arguments:
category_data = CreateCategoryInput(required=True)
ok = graphene.Boolean()
category = graphene.Field(CategoryType)
def mutate(self, info, category_data=None):
#category = CategoryType(name=name,description="aa")
ok=True
obj = Category.objects.create(name=category_data.name,description=category_data.description)
return CreateCategory(category=obj,ok=ok)
class UpdateCategoryInput(graphene.InputObjectType):
name = graphene.String(required=False)
description = graphene.String(required=False)
class UpdateCategory(graphene.Mutation):
class Arguments:
category_id=graphene.Int(required=True)
category_data = UpdateCategoryInput(required=False)
ok = graphene.Boolean()
category = graphene.Field(CategoryType)
def mutate(self, info, category_id,category_data=None):
old_object = Category.objects.get(id=category_id)
updated_values = {'name': old_object.name, 'description': old_object.description}
if category_data is not None:
if category_data.name is not None:
updated_values['name'] = category_data.name
if category_data.description is not None:
updated_values['description'] = category_data.description
obj,created = Category.objects.update_or_create(id=category_id,defaults=updated_values)
ok = True
return UpdateCategory(category=obj,ok=ok)
class DeleteCategory(graphene.Mutation):
class Arguments:
category_id=graphene.Int(required=True)
deleted = graphene.Boolean()
category_deleted_id = graphene.Int()
def mutate(self, info, category_id):
Category.objects.filter(id=category_id).delete()
deleted = True
return DeleteCategory(deleted=deleted,category_deleted_id=category_id)
class CreateDishInput(graphene.InputObjectType):
name = graphene.String(required=True)
category_id = graphene.Int(required=True)
description = graphene.String(required=False)
price = graphene.Int(required=False)
class CreateDish(graphene.Mutation):
class Arguments:
create_dish_input= CreateDishInput(required=False)
ok = graphene.Boolean()
dish = graphene.Field(DishType)
def mutate(self,info,create_dish_input):
obj = Dish.objects.create(name=create_dish_input.name,
category_id=create_dish_input.category_id,
description=create_dish_input.description,
price=create_dish_input.price)
ok = True
return CreateDish(dish = obj,ok = ok)
class UpdateDishInput(graphene.InputObjectType):
name = graphene.String(required=False)
description = graphene.String(required=False)
price = graphene.Int(required=False)
class UpdateDish(graphene.Mutation):
class Arguments:
dish_id = graphene.Int(required=True)
dish_data = UpdateDishInput(required=False)
ok = graphene.Boolean()
dish = graphene.Field(DishType)
def mutate(self,info,dish_id,dish_data=None):
old_object = Dish.objects.get(id=dish_id)
updated_values = {'name': old_object.name, 'description': old_object.description,'price':old_object.price}
if dish_data is not None:
if dish_data.name is not None:
updated_values['name'] = dish_data.name
if dish_data.description is not None:
updated_values['description'] = dish_data.description
if dish_data.price is not None:
updated_values['price'] = dish_data.price
obj,created = Dish.objects.update_or_create(id=dish_id,defaults=updated_values)
ok = True
return UpdateDish(dish=obj, ok=ok)
class DeleteDish(graphene.Mutation):
class Arguments:
dish_id = graphene.Int(required=True)
deleted = graphene.Boolean()
dish_deleted_id = graphene.Int()
def mutate(self,info,dish_id):
Dish.objects.filter(id=dish_id).delete()
deleted = True
return DeleteDish(deleted=deleted,dish_deleted_id=dish_id)
class Mutation(graphene.AbstractType):
create_category = CreateCategory.Field()
update_category = UpdateCategory.Field()
delete_category = DeleteCategory.Field()
create_dish = CreateDish.Field()
update_dish = UpdateDish.Field()
delete_dish = DeleteDish.Field()
| {"/restaurant_graphql_api/schema.py": ["/restaurantapi/schema.py"], "/restaurantapi/schema.py": ["/restaurantapi/models.py"]} |
52,074 | samueldaviddelacruz/restaurant_graphql_CRUD | refs/heads/master | /restaurantapi/migrations/0001_initial.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2017-12-27 21:17
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Name', models.CharField(max_length=100)),
('Description', models.TextField(blank=True, null=True)),
('CreatedAt', models.DateTimeField(verbose_name='Category Creation date')),
],
),
migrations.CreateModel(
name='Dish',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Name', models.TextField()),
('Description', models.TextField(blank=True, null=True)),
('Price', models.DecimalField(decimal_places=2, default=0.0, max_digits=6)),
('Category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='restaurantapi.Category')),
],
),
]
| {"/restaurant_graphql_api/schema.py": ["/restaurantapi/schema.py"], "/restaurantapi/schema.py": ["/restaurantapi/models.py"]} |
52,075 | samueldaviddelacruz/restaurant_graphql_CRUD | refs/heads/master | /restaurantapi/migrations/0002_auto_20171227_2137.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2017-12-27 21:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('restaurantapi', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='category',
old_name='Description',
new_name='description',
),
migrations.RenameField(
model_name='category',
old_name='Name',
new_name='name',
),
migrations.RenameField(
model_name='dish',
old_name='Category',
new_name='category',
),
migrations.RenameField(
model_name='dish',
old_name='Description',
new_name='description',
),
migrations.RenameField(
model_name='dish',
old_name='Name',
new_name='name',
),
migrations.RenameField(
model_name='dish',
old_name='Price',
new_name='price',
),
migrations.RemoveField(
model_name='category',
name='CreatedAt',
),
]
| {"/restaurant_graphql_api/schema.py": ["/restaurantapi/schema.py"], "/restaurantapi/schema.py": ["/restaurantapi/models.py"]} |
52,084 | Straeng/nibe2influx | refs/heads/master | /src/nibe2influx_runner.py | from nibe2influx.nibe2influx import main
class TestDatabase:
def persist(self, measurement):
print(measurement)
if __name__ == '__main__':
main(db=TestDatabase()) | {"/src/nibe2influx/__main__.py": ["/src/nibe2influx/nibe2influx.py"], "/src/nibe2influx/nibe2influx.py": ["/src/nibe2influx/nibeapi.py"]} |
52,085 | Straeng/nibe2influx | refs/heads/master | /src/nibe2influx/__main__.py | from .nibe2influx import main
main() | {"/src/nibe2influx/__main__.py": ["/src/nibe2influx/nibe2influx.py"], "/src/nibe2influx/nibe2influx.py": ["/src/nibe2influx/nibeapi.py"]} |
52,086 | Straeng/nibe2influx | refs/heads/master | /src/setup.py | from setuptools import setup
setup(
name="nibe2influx",
packages=["nibe2influx"],
version="0.1",
# Dependencies
install_requires=['requests_oauthlib>=0.8.0', 'influxdb>=4.1'],
package_data={
# Additional files to include
'': ['sensors.json', 'config.json.template'],
},
entry_points = {
"console_scripts": ["nibe2influx = nibe2influx.nibe2influx:main"]
},
author="Martin Sträng",
author_email="martin@tilljorden.com",
description="Read parameters from Nibe Uplink and store in InfluxDB.",
license="MIT",
keywords="",
url="",
) | {"/src/nibe2influx/__main__.py": ["/src/nibe2influx/nibe2influx.py"], "/src/nibe2influx/nibe2influx.py": ["/src/nibe2influx/nibeapi.py"]} |
52,087 | Straeng/nibe2influx | refs/heads/master | /src/nibe2influx/nibe2influx.py | import json
import time
import itertools
import os
import sys
from influxdb import InfluxDBClient
from .nibeapi import NibeApi
_CONFIG_DIR = sys.prefix+'/etc/nibe2influx/'
_CONFIG_FILE = _CONFIG_DIR+'config.json'
_SENSOR_FILE = _CONFIG_DIR+'sensors.json'
_TOKEN_FILE = _CONFIG_DIR+'token.json'
class InfluxDatabase():
def __init__(self, conf):
addr = conf['addr']
port = conf['port']
self.dbname = conf['dbname']
self.client = InfluxDBClient(addr, port)
self.client.create_database(self.dbname)
def persist(self, measurement):
self.client.write_points(measurement, database=self.dbname)
def update_measurement(measurements, sensors, apiresp):
for i in range(len(apiresp)):
if sensors[i]['id'] != apiresp[i]['parameterId']:
return False
raw_value = float(apiresp[i]['rawValue'])
scale = float(sensors[i]['scale'])
value = raw_value*scale
measurements[i]['fields']['value'] = value
return True
def config_template():
template = {
"nibeapi" : {
"client" : {
"client_id":"",
"client_secret":""
},
"system_id" : 1234,
"update_interval_seconds" : 30,
"redirect_uri" : ""
},
"influx" : {
"addr" : "127.0.0.1",
"port" : 8086,
"dbname" : "nibe"
}
}
with open(_CONFIG_FILE, 'w') as cf:
json.dump(template, cf, indent=4)
def sensor_template():
template = {
"measurement1": [
{"id" : 1234, "scale":0.1, "tags":{
"name1": "value1",
"name2": "value2",
}},
{"id" : 4321, "scale":1, "tags":{
"name1": "value1",
"name2": "value2",
}}
],
"measurement2": [
{"id" : 1234, "scale":0.1, "tags":{
"name1": "value1",
"name2": "value2",
}},
{"id" : 4321, "scale":1, "tags":{
"name1": "value1",
"name2": "value2",
}}
]
}
with open(_SENSOR_FILE, 'w') as sf:
json.dump(template, sf, indent=4)
def config_files():
config_ok = True
# Check config dir
if not os.path.isdir(_CONFIG_DIR):
try:
os.makedirs(_CONFIG_DIR)
print('Created {}'.format(_CONFIG_DIR))
except:
print('ERROR: Could not create {}'.format(_CONFIG_DIR))
# Check config file
try:
with open(_CONFIG_FILE) as cf:
conf = json.load(cf)
except:
print('{} not found. Creating template.'.format(_CONFIG_FILE))
config_template()
config_ok = False
try:
with open(_SENSOR_FILE) as sf:
sensors = json.load(sf)
except:
print('{} not found. Creating template.'.format(_SENSOR_FILE))
sensor_template()
config_ok = False
if config_ok:
return conf, sensors
else:
print('Update templates with proper data before running the application.')
exit(0)
def main(db=None):
conf, sensors = config_files()
idlists={}
measurements={}
for group in sensors:
# Generate list of IDs as required by Nibe API.
idlists[group] = [sensor['id'] for sensor in sensors[group]]
measurements[group] = []
# Generate structure required by InfluxDB.
for sensor in sensors[group]:
measurements[group].append({'measurement':group, 'fields':{'value':''}, 'tags':sensor['tags']})
update_interval = conf['nibeapi']['update_interval_seconds']
api = NibeApi(conf['nibeapi'], _TOKEN_FILE)
if db is None:
db = InfluxDatabase(conf['influx'])
for m in itertools.cycle(measurements):
apiresp = api.get_parameters(idlists[m])
if update_measurement(measurements[m], sensors[m], apiresp):
db.persist(measurements[m])
else:
print('ERROR: api returndata mismatch')
exit(1)
time.sleep(update_interval)
| {"/src/nibe2influx/__main__.py": ["/src/nibe2influx/nibe2influx.py"], "/src/nibe2influx/nibe2influx.py": ["/src/nibe2influx/nibeapi.py"]} |
52,088 | Straeng/nibe2influx | refs/heads/master | /src/nibe2influx/nibeapi.py | import json
from requests_oauthlib import OAuth2Session
class NibeApi:
'''
TODO: Currently assumes that authorization is already done and
there is a tokej.json file.
'''
_BASE_URL = 'https://api.nibeuplink.com'
_TOKEN_URL = _BASE_URL+'/oauth/token'
_API_URL = _BASE_URL+'/api/v1'
def __init__(self, conf, token_file):
self.sysid = conf['system_id']
self.token_file = token_file
client_conf = conf['client']
token = self.recall_token()
self.session = OAuth2Session(
client_id = client_conf['client_id'],
redirect_uri = conf['redirect_uri'],
auto_refresh_url = self._TOKEN_URL,
auto_refresh_kwargs = client_conf,
scope = [ 'READSYSTEM' ],
token = token,
token_updater = self.persist_token
)
if not token:
self.request_new_token(client_conf['client_secret'])
def persist_token(self, token):
with open(self.token_file, 'w') as tf:
json.dump(token, tf)
def recall_token(self):
try:
with open(self.token_file) as tf:
token = json.load(tf)
if 'access_token' in token and 'refresh_token' in token:
return token
else:
return None
except:
# TODO
print("ERROR: Currently requires a pre-existing token file.")
exit(1)
return None
def get(self, res, params):
url = '{}{}'.format(self._API_URL, res)
result = self.session.get(url, params=params, headers={})
return result.json()
def request_new_token(self, client_secret):
token = self.session.fetch_token(self._TOKEN_URL,
client_secret = client_secret,
authorization_response = '') #TODO
print(token)
self.persist_token(token)
return token
def get_categories(self):
return self.get('/systems/{}/serviceinfo/categories'.format(self.sysid), {})
def get_category(self, cid):
return self.get('/systems/{}/serviceinfo/categories/{}'.format(self.sysid, cid), {})
def get_parameters(self, paramlist):
return self.get('/systems/{}/parameters'.format(self.sysid), {'parameterIds':paramlist}) | {"/src/nibe2influx/__main__.py": ["/src/nibe2influx/nibe2influx.py"], "/src/nibe2influx/nibe2influx.py": ["/src/nibe2influx/nibeapi.py"]} |
52,092 | Talha-Altair/Highcharts_flask_csv-APP | refs/heads/main | /app.py | '''
Source:
Author: Altair
'''
from flask import Flask,render_template, jsonify, request
import random
import json
import data_c
app = Flask(__name__)
@app.route("/", methods=["GET","POST"])
def startpy():
result = {
"Greetings" : "Altair"
}
#return jsonify(result)
return render_template("ca-wildfires.html")
'''
http://0.0.0.0:3091/api/data
'''
@app.route("/api/data", methods=["GET"])
def api_get_data():
result = data_c.get_data()
# result_dict = {
# 'year' : year,
# 'pytorch' : pytorch,
# 'tensorFlow' : tensorFlow
# }
return jsonify(result)
'''
http://0.0.0.0:3091/api/add
http://0.0.0.0:3091/api/add?year=2017&ontario_tourist=20345&quebec_tourist=200
http://0.0.0.0:3000/api/add?year=2021&pytorch=180&tensorFlow=90
http://127.0.0.1:6969/api/add?year=&wildfires=
'''
@app.route("/api/add", methods=["GET"])
def api_add_data():
year = request.values.get('year')
wildfires = request.values.get('wildfires')
result = {
'year' : year,
'wildfires' : wildfires
}
result_data = data_c.add_row(year, wildfires)
return jsonify(result)
if __name__ == "__main__":
app.run(debug = True,port = 6969 )
| {"/app.py": ["/data_c.py"]} |
52,093 | Talha-Altair/Highcharts_flask_csv-APP | refs/heads/main | /data_c.py | import pandas as pd
def get_data():
df = pd.read_csv('fire.csv')
year = df['year'].tolist()
wildfires = df['wildfires'].tolist()
result_dict = {
'year' : year,
'wildfires' : wildfires
}
#print(result_dict)
return result_dict
def add_row (year, wildfires):
df = pd.read_csv('fire.csv')
new_row = {
'year' : year,
'wildfires' : wildfires
}
print(df)
df = df.append(new_row, ignore_index=True)
print(df)
df.to_csv('fire.csv')
if __name__ == "__main__":
get_data()
| {"/app.py": ["/data_c.py"]} |
52,094 | FToovvr/adnmb-quests-watcher | refs/heads/master | /commons/config.py | from typing import Optional, List
from dataclasses import dataclass
from pathlib import Path
from os.path import join
import re
import yaml
import anobbsclient
@dataclass(frozen=True)
class ClientConfig:
host: str
client_user_agent: str
client_appid: Optional[str]
user_userhash: str
def create_client(self) -> anobbsclient.Client:
return anobbsclient.Client(
user_agent=self.client_user_agent,
host=self.host,
appid=self.client_appid,
default_request_options={
'user_cookie': anobbsclient.UserCookie(userhash=self.user_userhash),
'login_policy': 'when_required',
'gatekeeper_page_number': 100,
},
)
@dataclass(frozen=True)
class DatabaseConfig:
host: str
dbname: str
user: str
password: str
@property
def connection_string(self) -> str:
# XXX: 也不知道够不够安全,不过反正也是自用
for x in [self.host, self.dbname, self.user, self.password]:
assert(re.search(r'\s', x) is None)
return f'dbname={self.dbname} user={self.user} password={self.password} host={self.host}'
@dataclass(frozen=True)
class PublishingConfig:
page_capacity: int
including: List[str]
@dataclass(frozen=True)
class Config:
board_id: int
trend_thread_id: Optional[int]
daily_qst_thread_id: Optional[int]
completion_registry_thread_id: int
database: DatabaseConfig
client: Optional[ClientConfig]
publishing: PublishingConfig
def load_config(path: str) -> Config:
path = Path(path)
with open(path, 'r') as config_file:
obj = yaml.load(config_file.read(), Loader=yaml.SafeLoader)
consts = obj['consts']
database = obj['database']
with open(join(path.parent, database['password-file']), 'r') as pw_file:
database_password = pw_file.read().strip()
database = DatabaseConfig(
host=database['host'],
dbname=database['dbname'],
user=database['user'],
password=database_password,
)
client = obj['client']
if client is not None:
with open(join(path.parent, client['file']), 'r') as client_file:
client_obj = yaml.load(
client_file.read(), Loader=yaml.SafeLoader)
client = ClientConfig(
host=client_obj['host'],
client_user_agent=client_obj['client']['user-agent'],
client_appid=client_obj['client']['appid'],
user_userhash=client_obj['user']['userhash'],
)
publishing = obj['publishing']
publishing = PublishingConfig(
page_capacity=publishing['page-capacity'],
including=publishing['including'],
)
return Config(
board_id=consts['board-id'],
trend_thread_id=consts['trend-thread-id'],
daily_qst_thread_id=consts['daily-qst-thread-id'],
completion_registry_thread_id=consts['completion-registry-thread-id'],
database=database,
client=client,
publishing=publishing,
)
| {"/0_tick.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/publication_record.py"], "/1_collect.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/fun/generate_wordcloud.py": ["/commons/consts.py", "/commons/config.py"], "/commons/include_filters.py": ["/commons/thread_stats.py"], "/3_generate_text_report.py": ["/commons/consts.py", "/commons/config.py", "/commons/thread_stats.py", "/commons/include_filters.py", "/models/analyzing.py", "/models/publication_record.py", "/commons/debugging.py", "/fun/generate_wordcloud.py"], "/2.6_check_completed.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/debugging.py": ["/models/analyzing.py"], "/2.5_check_disappeared.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/thread_stats.py": ["/commons/consts.py"], "/db/migrate.py": ["/models/publication_record.py", "/models/activity.py"], "/models/analyzing.py": ["/commons/consts.py", "/commons/thread_stats.py"], "/models/collecting.py": ["/commons/consts.py"]} |
52,095 | FToovvr/adnmb-quests-watcher | refs/heads/master | /0_tick.py | #!/usr/bin/env python3
from datetime import datetime, timedelta, date
import os
import subprocess
import pprint
import time
from tendo import singleton
import psycopg2
import sys
os.chdir(sys.path[0]) # noqa
from commons.consts import local_tz, get_target_date
from commons.config import load_config
from models.activity import Activity
from models.publication_record import PublicationRecord
LOG_FILE_PATH_FORMAT = 'logs/%Y-%m-%d'
def round_to_minutes(datetime: datetime) -> datetime:
if not datetime:
return None
return datetime.replace(second=0, microsecond=0)
def main():
singleton.SingleInstance()
now = round_to_minutes(datetime.now(tz=local_tz))
today = now.date()
prepare_log_folder(today=today)
if len(sys.argv) > 1 and sys.argv[1] == '--test':
print('testing…')
config_file_name = './config.test.yaml'
for x in range(3, 0, -1):
print(x)
time.sleep(1)
print('start testing')
else:
config_file_name = './config.yaml'
config = load_config(config_file_name)
with psycopg2.connect(config.database.connection_string) as conn:
conn: psycopg2._psycopg.connection = conn
# 每五分钟采集一次
if now.minute % 5 == 0:
last_run_at_collect = round_to_minutes(
Activity.get_last_activity_run_at(conn, 'collect'))
if not last_run_at_collect or now >= last_run_at_collect + timedelta(minutes=5):
print('run 1_collect')
result = subprocess.run([
'./1_collect.py',
'-c', config_file_name,
])
assert(result.returncode == 0)
# 每单数小时 55 分检查一次完结情况
if now.hour % 2 == 1 and now.minute >= 55:
last_run_at_check_completed = round_to_minutes(
Activity.get_last_activity_run_at(conn, 'check_completed'))
if not last_run_at_check_completed or now >= last_run_at_check_completed + timedelta(hours=1):
print('run 2.6_check_completed')
result = subprocess.run([
'./2.6_check_completed.py',
'-c', config_file_name,
])
assert(result.returncode == 0)
target_date = get_target_date(now)
# 报告中午 12 点后发
if now.hour >= 12 and \
not PublicationRecord.is_report_published(
conn=conn, subject_date=target_date, report_type='trend'):
# 先检查一下报告的那天有没有那些串消失了
last_run_at_check_disappeared = round_to_minutes(
Activity.get_last_activity_run_at(conn, 'check_disappeared'))
# 为了防止发送失败导致此步骤频繁执行,每次执行间隔至少 1 小时
if not last_run_at_check_disappeared or now >= last_run_at_check_disappeared + timedelta(hours=1):
print('run 2.5_check_disappeared.py')
result = subprocess.run([
'./2.5_check_disappeared.py', target_date.isoformat(),
'-c', config_file_name,
])
assert(result.returncode == 0)
# 发布报告
print('run 3_generate_text_report.py')
result = subprocess.run([
'./3_generate_text_report.py', target_date.isoformat(),
'-c', config_file_name,
'--check-sage',
'--publish', '--notify-daily-qst',
])
assert(result.returncode == 0)
def prepare_log_folder(today: date):
yesterday = today - timedelta(days=1)
# 如果不存在,创建今日的日志文件夹
os.makedirs(today.strftime(LOG_FILE_PATH_FORMAT), exist_ok=True)
# 如果存在,归档昨日的日志
yesterday_log_folder = yesterday.strftime(LOG_FILE_PATH_FORMAT)
if os.path.isdir(yesterday_log_folder):
result = subprocess.run(
['tar', 'czf', f'{yesterday_log_folder}.tgz', yesterday_log_folder])
if result.returncode == 0:
subprocess.run(['/bin/rm', '-rf', yesterday_log_folder])
if __name__ == '__main__':
main()
| {"/0_tick.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/publication_record.py"], "/1_collect.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/fun/generate_wordcloud.py": ["/commons/consts.py", "/commons/config.py"], "/commons/include_filters.py": ["/commons/thread_stats.py"], "/3_generate_text_report.py": ["/commons/consts.py", "/commons/config.py", "/commons/thread_stats.py", "/commons/include_filters.py", "/models/analyzing.py", "/models/publication_record.py", "/commons/debugging.py", "/fun/generate_wordcloud.py"], "/2.6_check_completed.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/debugging.py": ["/models/analyzing.py"], "/2.5_check_disappeared.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/thread_stats.py": ["/commons/consts.py"], "/db/migrate.py": ["/models/publication_record.py", "/models/activity.py"], "/models/analyzing.py": ["/commons/consts.py", "/commons/thread_stats.py"], "/models/collecting.py": ["/commons/consts.py"]} |
52,096 | FToovvr/adnmb-quests-watcher | refs/heads/master | /1_collect.py | #!/usr/bin/env python3
from __future__ import annotations
from typing import Optional, Dict, List, Any
from dataclasses import dataclass, field
import traceback
import argparse
import sys
import os
from datetime import datetime, timedelta
import json
import logging
import logging.config
import traceback
import psycopg2
import anobbsclient
from anobbsclient.walk import create_walker, BoardWalkTarget, ReversalThreadWalkTarget
from commons.consts import local_tz
from commons.config import load_config
from models.activity import Activity, TotalBandwidthUsage, Stats
from models.collecting import DB
# 默认单线程
logging.config.fileConfig('logging.1_collect.conf')
@dataclass(frozen=True)
class Arguments:
config_file_path: str
def parse_args(args: List[str]) -> Arguments:
parser = argparse.ArgumentParser(
description='采集版块内容。',
)
parser.add_argument(
'-c', '--config', type=str, default='./config.yaml',
dest='config_file_path',
help='配置文件路径',
)
parsed = parser.parse_args(args)
return Arguments(
config_file_path=parsed.config_file_path,
)
def main():
args = parse_args(sys.argv[1:])
config = load_config(args.config_file_path)
with psycopg2.connect(config.database.connection_string) as conn_activity, \
psycopg2.connect(config.database.connection_string) as conn_db:
activity = Activity(conn=conn_activity,
activity_type='collect',
run_at=datetime.now(tz=local_tz))
db = DB(conn=conn_db,
completion_registry_thread_id=config.completion_registry_thread_id)
stats = Stats()
fetching_since = activity.should_collect_since
is_successful = False
message = None
try:
fetch_board(db=db, activity=activity, client=config.client.create_client(),
board_id=config.board_id, fetching_since=fetching_since, stats=stats)
is_successful = True
except:
exc_text = traceback.format_exc()
logging.critical(exc_text)
message = exc_text
finally:
activity.report_end(is_successful, message, stats)
if is_successful:
logging.info("成功结束")
else:
exit(1)
def fetch_board(db: DB, activity: Activity, client: anobbsclient.Client,
board_id: int, fetching_since: datetime, stats: Stats):
logger = logging.getLogger('FETCH')
walker = create_walker(
target=BoardWalkTarget(
board_id=board_id,
start_page_number=1,
stop_before_datetime=fetching_since,
),
client=client,
)
is_first_found_thread = True
threads_on_board: List[anobbsclient.BoardThread] = []
bandwidth_usage_for_board = TotalBandwidthUsage()
for (pn, page, usage) in walker:
logger.info(f'获取到版块第 {pn} 页。纳入串数 = {len(page)}')
bandwidth_usage_for_board.add(usage)
stats.board_request_count += 1
threads_on_board += page
stats.total_bandwidth_usage.add(bandwidth_usage_for_board.total)
logger.info(f'完成获取版块。总共纳入串数 = {len(threads_on_board)},'
+ f'期间 (上传字节数, 下载字节数) = {bandwidth_usage_for_board.total}')
now = datetime.now(tz=local_tz)
for (i, thread) in enumerate(threads_on_board):
logger.debug(f'串 #{i}。串号 = {thread.id},'
+ f'最后修改时间 = {thread.last_modified_time}')
if is_first_found_thread:
is_first_found_thread = False
activity.report_collecting_range(
since=fetching_since, until=thread.last_modified_time)
is_thread_recorded = db.is_thread_recorded(thread.id)
if not is_thread_recorded:
stats.new_thread_count += 1
# 记录或更新串
# current_reply_count 在后面一同记录
db.record_thread(thread, board_id=board_id, updated_at=now)
if len(thread.replies) == 0:
assert(thread.total_reply_count == 0)
logger.debug(f'串 #{i} 暂无回应,到此结束')
continue
# 根据数据库中是否已存在该串之前抓取到的回应,
# 来决定如何判断某回应是否是抓取目标
latest_seen_reply_id = \
db.try_find_thread_latest_seen_reply_id(thread_id=thread.id)
has_old_records = latest_seen_reply_id is not None
if has_old_records:
def is_target(x): return x.id > latest_seen_reply_id
logger.debug(f'串 #{i} 是之前已经抓取过的串,'
+ f'将会通过之前抓取到的最大串号作为范围的下界')
else:
def is_target(x): return x.created_at >= fetching_since
logger.debug(f'串 #{i} 是之前曾未抓取过的串,'
+ f'将会通过规定的下界时间作为范围的下界')
new_responses_in_preview = list(
[post for post in thread.replies if is_target(post)])
if thread.total_reply_count <= 5 \
or not is_target(thread.replies[0]):
# 要抓取的内容全在预览里,不用再进串里去翻了
# TODO 判断是否没有剩余回应(len(thread.total_reply_count) <= 5)应该在 API 那边进行
if len(new_responses_in_preview) > 0:
if is_thread_recorded:
stats.affected_thread_count += 1
stats.new_post_count += len(new_responses_in_preview)
db.record_thread_replies(thread=thread, replies=new_responses_in_preview,
total_reply_count=thread.total_reply_count,
updated_at=now)
logger.debug(f'串 #{i} 由于全部需要抓取的回应已在预览之中,记录后到此结束。')
else:
# 反向遍历
start_page_number = (thread.total_reply_count - 1) // 19 + 1
logger.debug(f'串 #{i} 需要进入以抓取目标范围内的回应。' +
f'从回应总数推测出的当前页数 = {start_page_number}')
if (thread.total_reply_count % 19) <= 5:
# 最新一页的内容已经全部包含在预览中了,因此略过
logger.debug(f'串 #{i} 由于最新一页的回应已全部包含在预览中,抓取时会略过该页')
start_page_number -= 1
needs_gatekeeper_post_id = False
if has_old_records:
last_reply_count = \
db.get_thread_total_reply_count(thread_id=thread.id)
if last_reply_count is not None:
last_page_count = (last_reply_count - 1) // 19 + 1
else:
last_page_count = None
logger.warning(f'串 #{i} 存在曾抓取到的回应,但却没有记录回应总数')
if (last_page_count is None or not client.thread_page_requires_login(last_page_count)) \
and client.thread_page_requires_login(start_page_number):
needs_gatekeeper_post_id = True
logger.debug(f'串 #{i} 由于要抓取的内容需要登录,'
+ f'而之前抓取到的内容在需要登录之前,无法用以判断是否卡页,'
+ f'因而需要额外获取第 100 页来确认守门串号')
elif client.thread_page_requires_login(start_page_number):
needs_gatekeeper_post_id = True
logger.debug(f'串 #{i} 由于要抓取的内容需要登录,'
+ f'而之前曾未抓取过内容,无法用以判断是否卡页,'
+ f'因而需要额外获取第 100 页来确认守门串号')
if needs_gatekeeper_post_id:
# TODO: 这一块应该放在 API 那边
(gatekeeper_page, usage) = client.get_thread_page(
id=thread.id, page=client.get_thread_gatekeeper_page_number())
stats.total_bandwidth_usage.add(usage)
stats.thread_request_count += 1
gatekeeper_post_id = gatekeeper_page.replies[-1].id
logger.debug(f'串 #{i} 确认守门串号。守门串号 = {gatekeeper_post_id}')
else:
gatekeeper_post_id = None
if has_old_records:
walker = create_walker(
target=ReversalThreadWalkTarget(
thread_id=thread.id,
start_page_number=start_page_number,
gatekeeper_post_id=gatekeeper_post_id,
stop_before_post_id=latest_seen_reply_id,
expected_stop_page_number=last_page_count,
),
client=client,
)
else:
walker = create_walker(
target=ReversalThreadWalkTarget(
thread_id=thread.id,
start_page_number=start_page_number,
gatekeeper_post_id=gatekeeper_post_id,
stop_before_datetime=fetching_since,
),
client=client,
)
final_reply_count = None
targets = []
bandwidth_usage_for_thread = TotalBandwidthUsage()
thread_walk_page_count = 0
for (pn, page, usage) in walker:
thread_walk_page_count += 1
stats.thread_request_count += 1
if client.thread_page_requires_login(pn):
stats.logged_in_thread_request_count += 1
logger.debug(f'串 #{i} 页 {pn}。纳入回应数 = {len(page.replies)}')
page: anobbsclient.ThreadPage = page
bandwidth_usage_for_thread.add(usage)
if final_reply_count is None:
final_reply_count = page.body.total_reply_count
targets += page.replies
targets += new_responses_in_preview
now_after_fetching_inside_thread = datetime.now(tz=local_tz)
db.record_thread_replies(thread=thread, replies=targets,
total_reply_count=final_reply_count,
updated_at=now_after_fetching_inside_thread)
stats.total_bandwidth_usage.add(bandwidth_usage_for_thread.total)
if len(targets) > 0:
if is_thread_recorded:
stats.affected_thread_count += 1
stats.new_post_count += len(targets)
logger.debug(f'串 #{i} 已抓取到范围内所有新回应,记录后到此结束。'
+ f'遍历访问页数 = {thread_walk_page_count},'
+ f'期间 (上传字节数, 下载字节数) = {bandwidth_usage_for_thread.total}')
if __name__ == '__main__':
main()
| {"/0_tick.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/publication_record.py"], "/1_collect.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/fun/generate_wordcloud.py": ["/commons/consts.py", "/commons/config.py"], "/commons/include_filters.py": ["/commons/thread_stats.py"], "/3_generate_text_report.py": ["/commons/consts.py", "/commons/config.py", "/commons/thread_stats.py", "/commons/include_filters.py", "/models/analyzing.py", "/models/publication_record.py", "/commons/debugging.py", "/fun/generate_wordcloud.py"], "/2.6_check_completed.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/debugging.py": ["/models/analyzing.py"], "/2.5_check_disappeared.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/thread_stats.py": ["/commons/consts.py"], "/db/migrate.py": ["/models/publication_record.py", "/models/activity.py"], "/models/analyzing.py": ["/commons/consts.py", "/commons/thread_stats.py"], "/models/collecting.py": ["/commons/consts.py"]} |
52,097 | FToovvr/adnmb-quests-watcher | refs/heads/master | /commons/consts.py | from datetime import datetime, timedelta, date
from dateutil import tz
local_tz = tz.gettz('Asia/Shanghai')
ZWSP = '\u200b'
WORD_JOINER = '\u2060'
OMITTING = ZWSP + "…"
def get_target_date(now: datetime = None) -> date:
if now is None:
now = datetime.now(tz=local_tz)
return (now - timedelta(hours=4)).date() - timedelta(days=1)
| {"/0_tick.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/publication_record.py"], "/1_collect.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/fun/generate_wordcloud.py": ["/commons/consts.py", "/commons/config.py"], "/commons/include_filters.py": ["/commons/thread_stats.py"], "/3_generate_text_report.py": ["/commons/consts.py", "/commons/config.py", "/commons/thread_stats.py", "/commons/include_filters.py", "/models/analyzing.py", "/models/publication_record.py", "/commons/debugging.py", "/fun/generate_wordcloud.py"], "/2.6_check_completed.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/debugging.py": ["/models/analyzing.py"], "/2.5_check_disappeared.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/thread_stats.py": ["/commons/consts.py"], "/db/migrate.py": ["/models/publication_record.py", "/models/activity.py"], "/models/analyzing.py": ["/commons/consts.py", "/commons/thread_stats.py"], "/models/collecting.py": ["/commons/consts.py"]} |
52,098 | FToovvr/adnmb-quests-watcher | refs/heads/master | /fun/generate_wordcloud.py | #!/usr/bin/env python3
from typing import Optional, Dict, List, Set, Callable
from dataclasses import dataclass
from collections import defaultdict, Counter
from operator import add
from functools import reduce
from datetime import datetime, date, timedelta
import hashlib
from random import Random
from pathlib import Path
import json
from statistics import mean, stdev, quantiles
import math
import psycopg2
from bs4 import BeautifulSoup
import jieba
# import pkuseg
import regex
from wordcloud import WordCloud
import os
import sys
sys.path.append(os.path.join(sys.path[0], '..')) # noqa
from commons.consts import get_target_date, local_tz
from commons.config import load_config
# seg = pkuseg.pkuseg()
def main():
os.chdir(sys.path[0])
if len(sys.argv) > 1:
subject_date = date.fromisoformat(sys.argv[1])
else:
subject_date = get_target_date(datetime.now(tz=local_tz))
print(f"日期:{subject_date.isoformat()}")
config = load_config('../config.yaml')
stop_words = load_default_stop_words()
with psycopg2.connect(config.database.connection_string) as conn:
img = generate_wordcloud(conn, subject_date, stop_words,
lambda total, i: print(f"{i+1}/{total}") if i % 1000 == 0 else None)
img_path = Path(f'../report_out/wordcloud_{subject_date.isoformat()}.png')
if img_path.exists():
img_path.unlink()
img.save(img_path)
def load_default_stop_words():
files = list(filter(lambda f: os.path.join(sys.path[0], f), [
'./stopwords.txt',
# './stopwords_supplement.txt',
]))
return load_stop_words_from_files(files)
def load_stop_words_from_files(stop_words_files: List[str]):
stop_words = set([' '])
for f_path in stop_words_files:
with open(f_path) as f:
_stop_words = f.read()
_stop_words = _stop_words.splitlines()
_stop_words = map(lambda x: x.split('#')[0].strip(), _stop_words)
stop_words = stop_words | set(_stop_words)
return stop_words
def generate_wordcloud(conn: psycopg2._psycopg.connection,
subject_date: date, stop_words: Optional[Set[str]] = None,
loading_progress_callback: Optional[Callable[[int, int], None]] = None):
dc = update_dc(conn, subject_date)
stop_words = load_default_stop_words()
adjust_dc(dc, stop_words)
data_count = dc['count']
words = []
total = total_contents(conn, subject_date)
for i, [_, content] in enumerate(each_content_on(conn, subject_date)):
words += filter(lambda word: word in data_count,
segment_content(content))
if loading_progress_callback:
loading_progress_callback(total, i)
words = Counter(words)
total_words_today = sum(words.values())
max_tf_today = words.most_common(1)[0][1] / total_words_today
def calculate_tf_idf(word, count):
def tf(count):
return count / total_words_today
if word in stop_words or word in ['No']:
return 0
# print(word, count, tf(count), max_tf_today, dc['n'], data_count[word])
tf_idf = (0.5 + 0.5*(tf(count) / max_tf_today)) * \
math.log10(dc['n']/(data_count[word]))
return tf_idf
tf_idfs = {word: calculate_tf_idf(word, count)
for word, count in words.items()}
with open(os.path.join(sys.path[0], '../report_out', f'tf-idf_{subject_date.isoformat()}.txt'), 'w+') as xf:
for [word, value] in sorted(tf_idfs.items(), key=lambda item: item[1], reverse=True):
xf.write(f'{word} {value}\n')
def md5_color_func(word: str = None, font_size=None, position=None,
orientation=None, font_path=None, random_state=None):
md5 = hashlib.md5(word.encode('utf-8')).hexdigest()
x = int(md5[:6], base=16) / float(16**6 - 1) * 240
return f'hsl({x}, 80%, 50%)'
random_state = Random()
random_state.seed("( ゚∀。)")
wc = WordCloud(
random_state=random_state,
background_color='white',
color_func=md5_color_func,
font_path='./fonts/NotoSerifSC/NotoSerifSC-SemiBold.otf',
width=800, height=600,
scale=2,
).generate_from_frequencies(tf_idfs)
# 这文件夹终于有作用了…
return wc.to_image()
@dataclass
class NextDict():
_dict: Dict[str, Dict[str, int]]
def __init__(self, orignal_next_dict: Dict[str, Dict[str, int]], *nargs, **kwargs):
self._dict = orignal_next_dict
def __getitem__(self, key):
s = super(NextDict, self)
if key in self._dict:
if not isinstance(self._dict[key], defaultdict):
self._dict[key] = defaultdict(int, **self._dict[key])
else:
self._dict[key] = defaultdict(int)
return self._dict[key]
def __setitem__(self, key, value):
self._dict[key] = value
def items(self):
return self._dict.items()
def get_dict(self):
return self._dict
def update_dc(conn: psycopg2._psycopg.connection, subject_date: date):
"""dc = data count"""
dc_file_path = os.path.join(sys.path[0], 'dc.json')
if not Path(dc_file_path).exists():
dc = {}
# 有多少组
dc['n'] = 0
# 如果以天、主串分组,某个词一共出现在几组过
dc['count'] = defaultdict(int)
# 某个词后面跟了哪些词多少次
dc['next'] = NextDict({})
with conn.cursor() as cur:
cur: psycopg2._psycopg.cursor = cur
cur.execute(r'''
SELECT run_at FROM activity
ORDER BY run_at ASC
LIMIT 1
''')
first_created_at: datetime = cur.fetchone()[0]
if first_created_at.hour < 4:
first_created_at = first_created_at.date() - timedelta(days=1)
else:
first_created_at = first_created_at.date()
# 第一天大概率不全
start_date = first_created_at + timedelta(days=1)
else:
with open(dc_file_path, 'r') as f:
dc = json.load(f)
start_date = date.fromisoformat(
dc['last_updated_date']) + timedelta(days=1)
dc['count'] = defaultdict(int, **dc['count'])
dc['next'] = NextDict(dc['next'])
print(f"dc start date: {start_date.isoformat()}")
updated = False
if subject_date >= start_date:
# https://stackoverflow.com/a/24637447
for current_date in [start_date + timedelta(days=x) for x in range(0, (subject_date-start_date).days + 1)]:
updated = True
total = total_contents(conn, current_date)
# dc['n'] += total
dc['n'] += 1
# seen_words_per_thread = defaultdict(set)
seen_words_today = set()
for i, [thread_id, content] in enumerate(each_content_on(conn, current_date)):
if i % 1000 == 0:
print(f'dc {current_date.isoformat()} {i+1}/{total}')
words = segment_content(content)
for i, word in enumerate(words):
if word == ' ':
continue
# seen_words_per_thread[thread_id].add(word)
seen_words_today.add(word)
if i < len(words) - 1 and words[i+1] != ' ':
dc['next'][word][words[i+1]] += 1
dc['next'][word]['$total'] += 1
# counts = Counter(
# reduce(add, map(lambda x: list(x), seen_words_per_thread.values())))
counts = Counter(list(seen_words_today))
dc['count'] = Counter(dc['count']) + counts
dc['last_updated_date'] = subject_date.isoformat()
if updated:
def i_f___ing_hate_python(obj):
if isinstance(obj, NextDict):
return obj.get_dict()
return obj.__dict__
with open(dc_file_path, 'w+') as f:
json.dump(dc, f, sort_keys=True, indent=4,
ensure_ascii=False, default=i_f___ing_hate_python)
dc['count'] = Counter(**dc['count'])
return dc
def adjust_dc(dc, stop_words: Optional[set] = None):
if stop_words is None:
stop_words = load_default_stop_words()
dc_count = dc['count']
for stop_word in stop_words:
dc_count.pop(stop_word, None)
dc_count_orig = dict(**dc_count)
for word, next in dc['next'].items():
if word in stop_words:
continue
outliers = find_outliers(next)
outliers = list(filter(lambda x: x[0] not in stop_words, outliers))
# if len(outliers) > 0:
# print(word, outliers)
for [outlier_word, outliers_count] in outliers:
dc_count[word+outlier_word] += dc_count_orig[word] * \
(outliers_count/next['$total'])
for x in [word, outlier_word]:
dc_count[x] = max(0, dc_count[x] - dc_count_orig[x]
* (outliers_count/dc['next'][x]['$total']))
if dc_count[x] == 0:
dc_count.pop(x)
elif int(dc_count[x]) == 0:
# workaround
dc_count[x] = 1
dc['count'] = dc_count
def find_outliers(x: Dict[str, int]):
l = list(filter(lambda item: not item[0].startswith('$'), x.items()))
if len(l) < 3:
return []
values = list(map(lambda item: item[1], l))
m = mean(values)
# if m < 10:
# return []
s = stdev(values)
# quartiles = quantiles(values)
# iqr = quartiles[2] - quartiles[0]
# https://stackoverflow.com/a/2303583
outliers = filter(lambda item: abs(item[1]-m) > 3*2 * s, l)
# outliers = filter(lambda item: item[1] > quartiles[2] + iqr * 3, l)
return list(outliers) # list(map(lambda item: item[0], outliers))
def segment_content(content: str):
content = BeautifulSoup(content, features='html.parser').get_text()
text = ' '.join(regex.findall(
r'[\p{Han}]+|[\p{Latin}][\p{Latin}-]*', content))
if len(text.strip()) == 0:
return []
# words += filter(lambda w: w not in stop_words, jieba.lcut(text))
return jieba.lcut(text)
# return seg.cut(text)
def total_contents(conn: psycopg2._psycopg.connection, subject_date: date):
with conn.cursor() as cur:
cur.execute(
r'SELECT count(id) FROM post WHERE in_boundaries(created_at, %s::timestamptz, %s::timestamptz)', get_range(subject_date))
return cur.fetchone()[0]
def each_content_on(conn: psycopg2._psycopg.connection, subject_date: date):
with conn.cursor() as cur:
cur.execute(r'SELECT id, parent_thread_id, content FROM post WHERE in_boundaries(created_at, %s::timestamptz, %s::timestamptz)',
get_range(subject_date))
for [id, parent_thread_id, content] in cur:
yield [parent_thread_id or id, content]
def get_range(subject_date: date):
return (f'{subject_date.isoformat()} 04:00+8',
f'{(subject_date + timedelta(days=1)).isoformat()} 04:00+8')
if __name__ == '__main__':
main()
| {"/0_tick.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/publication_record.py"], "/1_collect.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/fun/generate_wordcloud.py": ["/commons/consts.py", "/commons/config.py"], "/commons/include_filters.py": ["/commons/thread_stats.py"], "/3_generate_text_report.py": ["/commons/consts.py", "/commons/config.py", "/commons/thread_stats.py", "/commons/include_filters.py", "/models/analyzing.py", "/models/publication_record.py", "/commons/debugging.py", "/fun/generate_wordcloud.py"], "/2.6_check_completed.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/debugging.py": ["/models/analyzing.py"], "/2.5_check_disappeared.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/thread_stats.py": ["/commons/consts.py"], "/db/migrate.py": ["/models/publication_record.py", "/models/activity.py"], "/models/analyzing.py": ["/commons/consts.py", "/commons/thread_stats.py"], "/models/collecting.py": ["/commons/consts.py"]} |
52,099 | FToovvr/adnmb-quests-watcher | refs/heads/master | /commons/include_filters.py | from __future__ import annotations
from typing import Any, Type, Union, Literal, List, Dict
from dataclasses import dataclass, field
from abc import ABC, abstractmethod
from .thread_stats import ThreadStats, Counts
class IncludeRule(ABC):
@classmethod
def build(cls, builder: IncludeRuleBuilder, args: Any) -> IncludeRule:
if isinstance(args, dict):
return cls(**args)
if isinstance(args, list):
return cls(*args)
raise ValueError(args)
@abstractmethod
def __str__(self) -> str:
raise NotImplementedError()
@abstractmethod
def check(self, thread: ThreadStats, ranking: int, counts: Counts, all_threads: List[ThreadStats]) -> bool:
raise NotImplementedError()
@dataclass(frozen=True)
class IncludeRuleBuilder:
rule_map: Dict[str, Type[IncludeRule]] = field(default_factory=dict)
def register_rule(self, name: str, rule_class: Type[IncludeRule]):
assert(name not in self.rule_map)
self.rule_map[name] = rule_class
def build(self, root_rule_obj: Dict[str, Any]) -> IncludeRule:
if len(root_rule_obj) != 1:
raise ValueError(root_rule_obj)
[name, args] = next(iter(root_rule_obj.items()))
return self.rule_map[name].build(self, args)
include_rule_builder = IncludeRuleBuilder()
@dataclass(frozen=True)
class IncludeRuleRanking(IncludeRule):
verb: Union[Literal['<='], Literal['=='], Literal['>='],
Literal['<'], Literal['>']]
value: Union[int, Literal['@q1'], Literal['@q2'], Literal['@q3']]
def __post_init__(self):
if self.verb not in ['<=', '==', '>=', '<', '>']:
raise KeyError(self.verb)
if self.verb != '<=':
raise NotImplementedError(self.verb)
if not isinstance(self.value, int):
if self.value not in ['@q1', '@q2', '@q3']:
raise KeyError(self.value)
def __str__(self):
if isinstance(self.value, int):
return f"前 {self.value} 位"
if self.value.startswith('@q'):
qn = int(self.value[2:])
if qn == '1':
percentage = '75%'
elif qn == '2':
percentage = '50%'
elif qn == '3':
percentage = '25%'
else:
assert(False)
return f"前 {percentage}"
assert(False)
def check(self, thread: ThreadStats, ranking: int, counts: Counts, all_threads: List[ThreadStats]):
if isinstance(self.value, int):
if self.verb == '<=':
if self.value >= len(all_threads):
return True
return thread.increased_character_count >= all_threads[self.value].increased_character_count
assert(False)
if self.value.startswith('@q'):
qn = int(self.value[2:])
qn_new_responses = counts.thread_new_post_quartiles[qn-1]
if self.verb == '<=':
return thread.increased_response_count >= qn_new_responses
assert(False)
assert(False)
include_rule_builder.register_rule('ranking', IncludeRuleRanking)
@dataclass(frozen=True)
class IncludeRuleField(IncludeRule):
field_name: str
verb: Union[Literal['<='], Literal['=='], Literal['>='],
Literal['<'], Literal['>']]
value: Any
def __post_init__(self):
if self.verb not in ['<=', '==', '>=', '<', '>']:
raise KeyError(self.verb)
if self.verb != '>=':
raise NotImplementedError(self.verb)
if self.field_name not in [
'increased_response_count',
'increased_character_count',
]:
raise NotImplementedError(self.field_name)
def __str__(self):
if self.field_name == 'increased_response_count':
ret = f"新增回应≥{self.value}"
if self.value % 19 == 0:
ret += f"(满{self.value // 19}页)"
return ret
if self.field_name == 'increased_character_count':
return f"新增文本≥{self.value/1000:.2f}K"
assert(False)
def check(self, thread: ThreadStats, ranking: int, counts: Counts, all_threads: List[ThreadStats]):
if self.verb == '>=':
return getattr(thread, self.field_name) >= self.value
assert(False)
include_rule_builder.register_rule('field', IncludeRuleField)
@dataclass(frozen=True)
class IncludeRuleCombinator(IncludeRule):
children: List[IncludeRule]
@classmethod
def build(cls, builder: IncludeRuleBuilder, args: Any) -> IncludeRule:
if isinstance(args, list):
args = map(lambda r: builder.build(r), args)
return cls(list(args))
raise ValueError(args)
def __post_init__(self):
for child in self.children:
assert(isinstance(child, IncludeRule))
@property
def _has_child_combinators(self):
return len(list(filter(lambda c: isinstance(c, IncludeRuleCombinator), self.children)))
class IncludeRuleAll(IncludeRuleCombinator):
def check(self, thread: ThreadStats, ranking: int, counts: Counts, all_threads: List[ThreadStats]):
for child in children:
if not child.check(thread, ranking, counts, all_threads):
return False
return True
def __str__(self):
ret = ' 且 '.join(list(map(lambda c: str(c), self.children)))
if self._has_child_combinators:
ret = "(" + ret + ")"
return ret
include_rule_builder.register_rule('all', IncludeRuleAll)
class IncludeRuleAny(IncludeRuleCombinator):
def check(self, thread: ThreadStats, ranking: int, counts: Counts, all_threads: List[ThreadStats]):
for child in self.children:
if child.check(thread, ranking, counts, all_threads):
return True
return False
def __str__(self):
ret = ' 或 '.join(list(map(lambda c: str(c), self.children)))
if self._has_child_combinators:
ret = "(" + ret + ")"
return ret
include_rule_builder.register_rule('any', IncludeRuleAny)
| {"/0_tick.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/publication_record.py"], "/1_collect.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/fun/generate_wordcloud.py": ["/commons/consts.py", "/commons/config.py"], "/commons/include_filters.py": ["/commons/thread_stats.py"], "/3_generate_text_report.py": ["/commons/consts.py", "/commons/config.py", "/commons/thread_stats.py", "/commons/include_filters.py", "/models/analyzing.py", "/models/publication_record.py", "/commons/debugging.py", "/fun/generate_wordcloud.py"], "/2.6_check_completed.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/debugging.py": ["/models/analyzing.py"], "/2.5_check_disappeared.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/thread_stats.py": ["/commons/consts.py"], "/db/migrate.py": ["/models/publication_record.py", "/models/activity.py"], "/models/analyzing.py": ["/commons/consts.py", "/commons/thread_stats.py"], "/models/collecting.py": ["/commons/consts.py"]} |
52,100 | FToovvr/adnmb-quests-watcher | refs/heads/master | /3_generate_text_report.py | #!/usr/bin/env python3
from __future__ import annotations
from typing import Any, Tuple, List, Dict, OrderedDict, Optional, Union, Literal
from dataclasses import dataclass, field
import argparse
from pathlib import Path
import sys
import os
from datetime import datetime, timedelta, time, date
import logging
import logging.config
import traceback
import re
from time import sleep
from io import BytesIO
import yaml
import requests
from bs4 import BeautifulSoup
import psycopg2
import anobbsclient
from anobbsclient.walk import create_walker, ReversalThreadWalkTarget
from commons.consts import local_tz, ZWSP, OMITTING, get_target_date
from commons.config import load_config, ClientConfig
from commons.thread_stats import ThreadStats, Counts
from commons.include_filters import IncludeRule, include_rule_builder, IncludeRuleRanking
from models.analyzing import Stats, DB
from models.publication_record import PublicationRecord
from commons.debugging import super_huge_thread_pg
from fun.generate_wordcloud import generate_wordcloud
FORMAT_VERSION = '3.1'
CHARACTER_COUNT_METHOD_VERSION = '2'
CHARACTER_COUNT_METHOD_EXPLANATION = "除换行与一般空白外字符的个数"
@dataclass(frozen=True)
class Arugments:
config_file_path: str
target_date: date
check_sage: bool
trend_thread_id: int
publish_on_trend_thread: bool
daily_qst_thread_id: int
notify_target: Union[
None,
Literal['daily_qst_thread'],
Literal['trend_thread'],
]
notify_with_wordcloud: bool
page_capacity: int
include_rule: IncludeRule
no_compare: bool
connection_string: str
force_slience: bool
client_config: ClientConfig
@property
def requires_client(self) -> bool:
return self.publish_on_trend_thread or self.check_sage
def parse_args(args: List[str]) -> Arugments:
parser = argparse.ArgumentParser(
description="发布报告。"
)
parser.add_argument(
'-c', '--config', type=str, default='./config.yaml',
dest='config_file_path',
help='配置文件路径',
)
parser.add_argument(
type=date.fromisoformat, nargs='?',
dest='target_date',
help="要报告的日期",
)
parser.add_argument(
'--check-sage', action='store_true',
dest='check_sage',
help="是否在发布前检查所要发到的串有无被下沉",
)
parser.add_argument(
'--publish', action='store_true',
dest='publish',
help="是否要把报告发布到趋势串中",
)
parser.add_argument(
'--notify-daily-qst', action='store_true',
dest='notify_daily_qst_thread',
help="是否要通知跑团日报新发布的报告",
)
parser.add_argument(
'--notify-with-wordcloud', action='store_true',
dest='notify_with_wordcloud',
help="是否在新报告发布通知中附带词云"
)
parser.add_argument(
'--including', type=str, default=None,
dest='including',
help="主题串包含进报告的条件,YAML 格式",
)
parser.add_argument(
'--no-compare', action='store_true',
dest='no_compare',
help="报告不要包含与之前的数据的比较",
)
parser.add_argument(
'--debugging-scenario', '-D', type=str, default=None,
choices=['none', 'preview', 'preview-',
'publish_only', 'notify', 'notify-', 'check_sage'],
dest='debugging_scenario',
help="除错用。根据所输入的值,可能会修改其他参数的内容",
)
parsed = parser.parse_args(args)
config = load_config(parsed.config_file_path)
if parsed.target_date is None:
parsed.target_date = get_target_date()
if parsed.including == None:
parsed.including = config.publishing.including
else:
parsed.including = yaml.parse(parsed.including, Loader=yaml.SafeLoader)
parsed.including = include_rule_builder.build(parsed.including)
force_slience = False
notify_trend_thread_instead = False
s = parsed.debugging_scenario or 'none'
if s.startswith('preview'):
# 只预览产生的报告
parsed.check_sage = False
parsed.publish_on_trend_thread = False
elif s.startswith('check_sage'):
# 只执行检查是否下沉
parsed.check_sage = True
parsed.publish_on_trend_thread = False
force_slience = True
elif s.startswith('publish_only'):
# 只发布报告
parsed.check_sage = False
parsed.publish_on_trend_thread = True
parsed.notify_daily_qst_thread = False
elif s.startswith('notify'):
# 发布通知,但是会通知到与报告相同的串内
assert(parsed.publish_on_thread is not None)
parsed.publish_on_trend_thread = True
notify_trend_thread_instead = True
parsed.check_sage = False
else:
assert(s == 'none')
if s.endswith('-'):
parsed.including = IncludeRuleRanking('<=', 1)
if notify_trend_thread_instead:
notify_target = 'trend_thread'
else:
notify_target = 'daily_qst_thread' if parsed.notify_daily_qst_thread else None
return Arugments(
config_file_path=parsed.config_file_path,
target_date=parsed.target_date,
check_sage=parsed.check_sage,
trend_thread_id=config.trend_thread_id,
publish_on_trend_thread=parsed.publish,
daily_qst_thread_id=config.daily_qst_thread_id,
notify_target=notify_target,
notify_with_wordcloud=parsed.notify_with_wordcloud,
page_capacity=config.publishing.page_capacity,
include_rule=parsed.including,
no_compare=parsed.no_compare,
connection_string=config.database.connection_string,
force_slience=force_slience,
client_config=config.client,
)
MAIN_DIVIDER_PART = f"══{ZWSP}══{ZWSP}══"
META_MAIN_DIVIDER = f"{MAIN_DIVIDER_PART} META {MAIN_DIVIDER_PART}"
def main():
args = parse_args(sys.argv[1:])
if args.requires_client:
client = args.client_config.create_client()
else:
client = None
with psycopg2.connect(args.connection_string) as conn:
if args.publish_on_trend_thread:
publication_record = PublicationRecord(conn=conn,
subject_date=args.target_date, report_type='trend')
attempts = publication_record.attempts
if publication_record.is_done or attempts > 3:
return
publication_record.increase_attempts()
logging.config.fileConfig('logging.3_generate_text_report.conf')
logging.info(
f"开始进行发布报告相关流程。UUID={publication_record.uuid},数据库=PostgreSQL")
if args.check_sage and check_sage(args.trend_thread_id, client):
logging.warn("趋势串已下沉。本次终止")
return
if args.publish_on_trend_thread:
logging.info("尚未发送回应请求以发布报告,将生成报告文本并尝试发送回应请求")
uuid = publication_record.uuid
else:
uuid = None
pages = retrieve_data_then_generate_trend_report_text(
conn=conn,
daily_qst_thread_id=args.daily_qst_thread_id,
date=args.target_date, uuid=uuid,
rank_include_rule=args.include_rule,
rank_page_capacity=args.page_capacity,
should_compare_with_last_day=not args.no_compare,
)
if not args.publish_on_trend_thread:
if not args.force_slience:
print_report(pages)
return
logging.info(f"报告文本页数:{len(pages)}")
publish(pages, args.trend_thread_id, uuid, client, publication_record)
logging.info("已发送各页报告且找到报告各页对应的各回应")
if args.notify_target is not None:
if args.notify_target == 'daily_qst_thread':
notify_thread_id = args.daily_qst_thread_id
else:
assert(args.notify_target == 'trend_thread')
notify_thread_id = args.trend_thread_id
notify(conn,
notify_thread_id, args.target_date,
client, publication_record,
args.notify_with_wordcloud)
logging.info("成功结束")
def check_sage(trend_thread_id: int, client: anobbsclient.client):
(trend_thread, _) = client.get_thread_page(trend_thread_id, page=1)
return trend_thread.marked_sage
def print_report(pages: Tuple[str, str, str]):
for (title, name, content) in pages:
print('\n'.join([
"标题:" + title,
"名称:" + name,
content,
]))
def publish(pages: Tuple[str, str, str], destination_thread_id: int, uuid: str,
client: Optional[anobbsclient.Client],
publication_record: PublicationRecord):
publication_record.report_thread_id_and_reply_count(
thread_id=destination_thread_id,
reply_count=len(pages)
)
first_rount = True
for post in publication_record.reply_posts:
logging.info(f"处理发布第 {post.report_page_number} 页…")
if post.reply_post_id is not None:
logging.info(f"本页已有发布成功的记录,跳过")
continue
if first_rount:
first_rount = False
else:
logging.info(f"在发送报告前,由于发串间隔限制,将等待30秒")
sleep(30)
(title, name, content) = pages[post.report_page_number-1]
try:
client.reply_thread(content, to_thread_id=destination_thread_id,
title=title, name=name)
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as e:
logging.warning("请求服务器超时,将尝试检查是否成功发串")
logging.warning(traceback.format_exc())
except anobbsclient.ReplyException as e:
logging.critical(
f"服务器响应表示发布回应失败:error={e.raw_error};detail={e.raw_detail}")
raise e
except Exception as e:
logging.critical(f"发布回应失败:{e}")
logging.critical(traceback.format_exc())
raise e
else:
logging.info("服务器响应表示发布回应成功")
logging.info(f"将查找属于本页报告的回应")
found_post = find_last_post_with_uuid(client, destination_thread_id)
if found_post is None:
logging.error("未找到任何带有 UUID 回应,本次终止")
exit(1)
(report_page_number, post_id, uuid, offset) = found_post
if uuid != publication_record.uuid:
logging.error(f"最后带有 UUID 的回应的 UUID 与本次的不匹配,本次终止。找到的 UUID={uuid}")
exit(1)
if report_page_number != post.report_page_number:
logging.error("最后带有 UUID 的回应的页数与本次的不匹配,本次终止。" +
f"找到的页数={report_page_number}")
exit(1)
logging.info(f"找到本页报告对应回应,将记录。回应串号={post_id},偏移={offset}")
publication_record.report_found_reply_post(
report_page_number=report_page_number,
post_id=post_id, offset=offset,
)
def notify(conn: psycopg2._psycopg.connection,
notify_thread_id: int, subject_date: date,
client: anobbsclient.Client,
publication_record: PublicationRecord,
with_wordcloud: bool):
# TODO: 检查成功与否
logging.info(f"将发送报告出炉通知。由于发串间隔限制,将等待30秒")
sleep(30)
posts = publication_record.reply_posts
content = subject_date.strftime(
f"%Y年%-m月%-d日 跑团版 趋势日度报告:\n")
content += '\n'.join(
list(map(lambda x: f">>No.{x.reply_post_id}", posts))
) + '\n'
min_reply_pn = (posts[0].reply_offset-1)//19+1
max_reply_pn = (posts[-1].reply_offset-1)//19+1
if min_reply_pn == max_reply_pn:
content += f"(位于原串第{min_reply_pn}页)"
else:
content += f"(位于原串第{min_reply_pn}〜{max_reply_pn}页)"
content += '\n'
img_data = None
if with_wordcloud:
img = generate_wordcloud(conn=conn, subject_date=subject_date)
with BytesIO() as img_data_buf:
img.save(img_data_buf, format='PNG')
img_data = img_data_buf.getvalue()
client.reply_thread(
to_thread_id=notify_thread_id,
title="本期跑团版趋势报告出炉",
name=subject_date.strftime("%Y年%-m月%-d日 号"),
content=content,
attachment=None if not with_wordcloud else anobbsclient.Attachment(
name=f'词云_{subject_date.isoformat()}.png', file=img_data)
)
def find_last_post_with_uuid(client: anobbsclient.Client, thread_id: int) -> Optional[Tuple[int, int, str, int]]:
"""
Returns
-------
[0] : int
报告的页数。
不是回应所在的页数
[1] : int
目标回应的串号。
[2] : str
找到的 UUID。
[3] : int
目标回应的偏移。
如果没找到或者找到的第一个 uuid 不匹配,返回 None。
"""
# TODO: 可以根据上一次回应所在位置预测一下,大部分情况能把请求减少到1次
# TODO: 如果发现串 SAGE 了,以后就不发了,或者提前检查一下有无 SAGE?
(page_1, _) = client.get_thread_page(id=thread_id, page=1, for_analysis=1)
page_1: anobbsclient.ThreadPage = page_1
# TODO: 其实这个可以在 API 那边定义 property 来算吧
total_pages = (page_1.body.total_reply_count - 1) // 19 + 1
walker = create_walker(
target=ReversalThreadWalkTarget(
thread_id=thread_id,
gatekeeper_post_id=None,
start_page_number=total_pages,
),
client=client,
)
for (pn, page, _) in walker:
page: anobbsclient.ThreadPage = page
for (i, post) in enumerate(reversed(page.replies)):
text = BeautifulSoup(post.content).text
uuid_rx = re.compile(
r"(?:.*\n)+" + META_MAIN_DIVIDER + r"\n" +
r"(?:.*\n)+Report ID = ([0-9a-f\-]+).*(?:\n.*)*",
re.MULTILINE,
)
result = uuid_rx.match(text)
if result is None:
continue
uuid = result.group(1)
report_pn = int(re.match(r"页 ❬(\d+) / \d+❭", post.name).group(1))
return (report_pn, post.id, uuid, (pn-1)*19+1+(len(page.replies)-1-i))
return None
def retrieve_data_then_generate_trend_report_text(
conn: psycopg2._psycopg.connection,
daily_qst_thread_id: int,
date: datetime, uuid: str,
rank_include_rule: IncludeRule,
rank_page_capacity: int,
should_compare_with_last_day: bool,
) -> Tuple[str, str, str]:
with conn.cursor() as cur:
db = DB(cur=cur)
return TrendReportTextGenerator(
db=db,
daily_qst_thread_id=daily_qst_thread_id,
date=date,
rank_include_rule=rank_include_rule,
rank_page_capacity=rank_page_capacity,
uuid=uuid,
should_compare_with_last_day=should_compare_with_last_day,
).generate()
@dataclass(frozen=True)
class TrendReportTextGenerator:
db: DB
daily_qst_thread_id: int
date: datetime
rank_include_rule: IncludeRule
rank_page_capacity: int
uuid: str
should_compare_with_last_day: bool
threads: List[ThreadStats] = field(init=False)
counts: Counts = field(init=False)
def __post_init__(self):
object.__setattr__(self, 'threads',
self.db.get_daily_threads(self.date))
object.__setattr__(self, 'counts', Counts(self.threads))
def generate(self) -> List[Tuple[str, str, str]]:
"""
Returns
-------
[n][0] : str
标题。
[n][1] : str
名称。
[n][2] : str
正文。
"""
trending_boards \
= self._generate_trending_boards(self.rank_page_capacity)
report_pages = []
title = self.date.strftime("日度趋势 %Y-%m-%d")
for (i, board) in enumerate(trending_boards):
page_number = i + 1
name = f"页 ❬{page_number} / {len(trending_boards)}❭"
page = self._generate_page(
board, page_number, len(trending_boards))
report_pages.append([title, name, page])
return report_pages
def _generate_page(self, trending_board: str, page_number: int, total_page_number: int) -> str:
content = self._generate_head(page_number, total_page_number) + '\n'
if page_number == 1:
daily_qst_reference = self._generate_daily_qst_reference()
if daily_qst_reference is not None:
content += daily_qst_reference + '\n'
content += self._generate_summary() + '\n'
content += "收录范围:" + str(self.rank_include_rule)
content += '\n\n'
content += '\n'.join([self._format_heading(" 说明 "), '', ''])
content += f"「+X/Y」:\n 「X」代表总增量,「Y」代表PO增量。\n"
content += '\n'
content += f"文本统计方式:\n {CHARACTER_COUNT_METHOD_EXPLANATION}。\n"
content += '\n'
content += '\n'.join([self._format_heading(" 趋势 "), '', ''])
content += trending_board + '\n'
if page_number == 1:
misc_content = self._generate_misc()
if misc_content is not None:
content += '\n'.join([self._format_heading(" 杂项 "), '', ''])
content += misc_content + '\n'
content += '\n'.join([self._format_heading(" META "), '', ''])
content += self._generate_meta(page_number) + '\n'
return content
def _format_heading(self, name) -> str:
return f"{MAIN_DIVIDER_PART}{name}{MAIN_DIVIDER_PART}"
def _generate_head(self, page_number: int, total_page_number: int) -> str:
return '\n'.join([
self.date.strftime(f"【 {ZWSP} 跑团版 趋势 日度报告〔%Y-%m-%d〕】"),
f" {ZWSP} 第 ❬{page_number} / {total_page_number}❭ 页",
f"统计范围:当日上午4时~次日上午4时前",
'',
])
def _generate_daily_qst_reference(self) -> Optional[str]:
stuff = []
# 跑团日报
if self.daily_qst_thread_id is not None:
daily_qsts = self.db.get_responses_match(
self.date, self.daily_qst_thread_id, r'^\[头条\]\s*?<br />\r?$')
if len(daily_qsts) > 0:
daily_qst = daily_qsts[-1]
line1 = daily_qst[1].splitlines()[0]
m = re.search(r'(day .+?)\s*?<br />', line1)
if m is None:
issue_text = ''
else:
issue = m.group(1)
if len(issue) > 10:
issue = issue[:10] + "…"
issue_text = f"〔{issue}〕"
stuff.append(
f"跑团日报{issue_text}:>>No.{daily_qst[0]} (位于原串第{(daily_qst[2]-1)//19+1}页)")
# 每日鸽报
if True:
# 由于有「11.5期」这样的实例,要考虑一天发多期的情况
daily_dovess = self.db.get_responses_match(
self.date, 36939614, r'^Daily Dove 每日鸽报.*?<br />\r?$')
daily_dove_dict = OrderedDict()
for daily_dove in daily_dovess:
lines = daily_dove[1].splitlines()
if len(lines) < 2:
continue
line2 = lines[1]
m = re.search(r'第(\S*?)期\s*?<br />', line2)
if m is None:
issue = None
else:
issue = m.group(1)
daily_dove_dict[issue] = daily_dove
for issue, daily_dove in daily_dove_dict.items():
if issue is None:
issue_text = ''
else:
issue_text = f"〔第{issue}期〕"
stuff.append(
f"每日鸽报{issue_text}:>>No.{daily_dove[0]} (位于原串第{(daily_dove[2]-1)//19+1}页)")
# 有趣团推荐报
if True:
third_newspapers = self.db.get_responses_match(
self.date, 37777146, r'^『.*?报.*?』.*?<br />\r?$')
third_newspaper_name = None
if len(third_newspapers) > 0:
# 假设每日最多一期
third_newspaper = third_newspapers[-1]
line1 = third_newspaper[1].splitlines()[0]
m = re.search(r'『(.*?报)(.*?)』:?(.*?)<br />', line1)
if m is not None: # always true
third_newspaper_name = m.group(1)
issue_text = ''
subhead = m.group(2)
if len(subhead) <= 5:
issue_text = subhead
else:
issue_text = subhead[:5] + "…"
issue_date = m.group(3)
if len(issue_date) > 0:
if issue_text != "":
issue_text += " "
if len(issue) <= 5:
issue_text += issue_date
else:
issue_text += issue_date[:5] + "…"
if issue_text != "":
issue_text = f"〔{issue_text}〕"
stuff.append(
f"{third_newspaper_name}{issue_text}:>>No.{third_newspaper[0]} (位于原串第{(third_newspaper[2]-1)//19+1}页)")
if len(stuff) == 0:
return None
return '\n'.join(["当日刊物:"] + stuff) + '\n'
def _generate_summary(self) -> str:
class AttrsNone:
def __getattribute__(self, _):
return None
counts_before = AttrsNone()
if self.should_compare_with_last_day:
one_day_before = self.date - timedelta(days=1)
counts_before = Counts(self.db.get_daily_threads(one_day_before))
count_texts = self.__format_counts(self.counts, counts_before)
return '\n'.join(["统计范围内:"] + list(map(lambda x: f"{ZWSP} ∗ {x}", count_texts))) + '\n'
def __format_counts(self, counts: Counts, counts_before: Counts) -> List[str]:
return [
f"总计出现主题串 {self.__format_value_with_delta(counts.threads, counts_before.threads)} 串",
f"新增主题串 {self.__format_value_with_delta(counts.new_threads, counts_before.new_threads)} 串",
f"新增回应 {self.__format_value_with_delta(counts.new_posts, counts_before.new_posts)} 条",
f"主题串新增回应 {self.__format_q(counts.thread_new_post_quartiles, counts_before.thread_new_post_quartiles)}"
# 没太大意义…
# f"平均主题串新增回应 {counts.thread_new_post_average} 条,"
# + f"中位 {counts.thread_new_post_median} 条,"
# + f"S²={counts.thread_new_post_variance}"
]
def __format_value_with_delta(self, value: int, old_value: Optional[int]) -> str:
if old_value is None:
return str(value)
delta = value - old_value
if delta > 0:
return f"{value}(↑{delta})"
elif delta < 0:
return f"{value}(↓{abs(delta)})"
return f"{value}(→0)"
def __format_q(self, q: List[float], old_q: List[float]) -> str:
if old_q is None:
old_q = [None] * len(q)
q_texts = [f"Q₁={self.__format_value_with_delta(q[0], old_q[0])}"]
q_texts += [f"中位数={self.__format_value_with_delta(q[1], old_q[1])}"]
q_texts += [f"Q₃={self.__format_value_with_delta(q[2], old_q[2])}"]
return ' '.join(q_texts)
def _generate_trending_boards(self, step: int) -> List[str]:
included_threads = []
threads_with_new_blue_text = []
for (i, thread) in enumerate(self.threads):
ranking = i+1
if self.rank_include_rule.check(thread, ranking, self.counts, self.threads):
included_threads.append([ranking, thread])
threads_with_new_blue_text = sorted(threads_with_new_blue_text,
key=lambda x: x[1].created_at)
included_threads = threads_with_new_blue_text + included_threads
boards = []
for i in range(0, len(included_threads), step):
board = self._generate_trending_board(
included_threads[i:i+step], i)
boards.append(board)
return boards
def _generate_trending_board(self, threads: List[Tuple[int, ThreadStats]], i_start: int) -> str:
lines = []
for [ranking, thread] in threads:
lines += [self.__generate_thread_entry(thread, ranking)]
return '\n'.join(lines)
def __generate_thread_entry(self, thread: ThreadStats, ranking: int) -> str:
# thread = super_huge_thread_pg # DEBUGGING
head = f"#{ranking}"
padding = len(head) + 1
if thread.is_new:
head += f" [+{thread.increased_response_count}/{thread.increased_response_count_by_po} 回应 NEW!]"
else:
head += f" [+{thread.increased_response_count}/{thread.increased_response_count_by_po} ={thread.total_reply_count} 回应]"
head += f" [@"
if thread.is_new:
# # TODO: 应该更严谨些
# if thread.created_at.day == self.date.day:
# head += "当日"
# else:
# head += "次日"
head += thread.created_at.strftime('%m-%d')
else:
head += thread.created_at.strftime('%Y-%m-%d')
head += (thread.created_at.strftime(' %H:%M')
if thread.is_new else '') + "]"
subhead_lines = []
subhead_1 = []
approx_distinct_cookie_count = thread.distinct_cookie_count//5*5
if approx_distinct_cookie_count != 0:
subhead_1 += [f"(参与饼干≥{approx_distinct_cookie_count})"]
else:
subhead_1 += [f"(参与饼干≥1)"]
character_count = "(+" + \
f"{thread.increased_character_count/1000:.2f}K" + "/"
if thread.increased_character_count_by_po != 0:
character_count += f"{thread.increased_character_count_by_po/1000:.2f}K"
else:
character_count += "0"
character_count += " 文本)"
subhead_1 += [character_count]
subhead_lines += [' '.join(subhead_1)]
if not thread.is_disappeared:
blue_text = thread.blue_text
if blue_text is not None:
blue_text = blue_text.strip()
if len(blue_text) > 8:
blue_text = blue_text[:8] + OMITTING
if thread.are_blue_texts_new:
subhead_lines += [f"(新蓝字!「{blue_text}」)"]
else:
subhead_lines += [f"(蓝字「{blue_text}」)"]
preview = thread.generate_summary(free_lines=3)
else:
subhead_lines += ["(已消失)"]
preview = None
return '\n'.join(
[head]
+ list(map(lambda x: f'{ZWSP} ' * padding + x, subhead_lines))
+ [f">>No.{thread.id}"]
+ ([preview] if preview is not None else [])
+ [ZWSP.join([f"━━━━"]*4), '']
)
def _generate_misc(self) -> Optional[str]:
entries = list(filter(lambda x: x is not None, [
self._generate_tail_frequencies_report(),
self._generate_consecutive_tails_report(),
]))
if len(entries) > 0:
return '\n'.join(entries)
return None
def _generate_tail_frequencies_report(self) -> Optional[str]:
(count, tail_frequencies) = self.db.get_tail_frequencies(self.date)
if count == 0:
return None
text = f"统计范围内,「r」串尾出目频率 (n={count}):\n"
if 0 in tail_frequencies:
tail_frequencies.move_to_end(0)
f = list(reversed(tail_frequencies.items()))
f_max_min = [max(f, key=lambda x: x[1])[0],
min(f, key=lambda x: x[1])[0]]
f = list(map(lambda x: (
"{}={:05.2f}%*" if x[0] in f_max_min else "{}={:05.2f}% ").format(x[0], x[1]*100), f))
lines = []
for i in range(0, 10, 4):
lines += [' '.join(f[i:i+4])]
lines[-1] = lines[-1].rstrip()
lines[-1] += "(*最高/最低)"
return text + '\n'.join(lines) + '\n'
def _generate_consecutive_tails_report(self) -> Optional[str]:
lucky_numbers = self.db.get_consecutive_tail_counts(self.date, 3)
if len(lucky_numbers) == 0:
return None
lines = []
for (n, count, zero_count) in lucky_numbers:
text = "{} 连号 {} 次 ({:.2f}‰),".format(
n, count, count / self.counts.new_posts * 1000)
if zero_count > 0:
text += "其中全 0 有 {} 次 ({:.2f}‰)".format(
zero_count, zero_count / self.counts.new_posts * 1000)
else:
text += "其中没有全 0"
lines += [text]
return '\n'.join(["统计范围内,串尾连号次数:", ";\n".join(lines) + "。", ''])
def _generate_meta(self, page_number: int) -> str:
stats = self.db.get_meta_stats(self.date)
lines = []
if page_number == 1:
lines += [
f"统计期间:共上传 {stats.total_bandwidth_usage[0]:,} 字节,"
+ f"下载 {stats.total_bandwidth_usage[1]:,} 字节。", '',
]
lines += [
f'文本统计方式 Version = {CHARACTER_COUNT_METHOD_VERSION} ', '']
lines += [f'Format Version = {FORMAT_VERSION}', '']
lines += [f"Report ID = {self.uuid} # 定位用", '']
return '\n'.join(lines)
if __name__ == '__main__':
main()
| {"/0_tick.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/publication_record.py"], "/1_collect.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/fun/generate_wordcloud.py": ["/commons/consts.py", "/commons/config.py"], "/commons/include_filters.py": ["/commons/thread_stats.py"], "/3_generate_text_report.py": ["/commons/consts.py", "/commons/config.py", "/commons/thread_stats.py", "/commons/include_filters.py", "/models/analyzing.py", "/models/publication_record.py", "/commons/debugging.py", "/fun/generate_wordcloud.py"], "/2.6_check_completed.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/debugging.py": ["/models/analyzing.py"], "/2.5_check_disappeared.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/thread_stats.py": ["/commons/consts.py"], "/db/migrate.py": ["/models/publication_record.py", "/models/activity.py"], "/models/analyzing.py": ["/commons/consts.py", "/commons/thread_stats.py"], "/models/collecting.py": ["/commons/consts.py"]} |
52,101 | FToovvr/adnmb-quests-watcher | refs/heads/master | /2.6_check_completed.py | #!/usr/bin/env python3
from typing import List
from dataclasses import dataclass
import traceback
import logging
import logging.config
import argparse
from datetime import datetime, date
import sys
from pathlib import Path
import re
import psycopg2
import anobbsclient
from commons.consts import local_tz
from commons.config import load_config
from models.activity import Activity, Stats
from models.collecting import DB
logging.config.fileConfig('logging.2.6_check_status_of_completed_threads.conf')
# FIXME: 遇到被删的串不会记录被删,导致会一直检查下去
@dataclass(frozen=True)
class Arguments:
config_file_path: str
def parse_args(args: List[str]) -> Arguments:
parser = argparse.ArgumentParser(
description='检查申请完结但尚未记录有添加蓝字的主串内容是否有变化。',
)
parser.add_argument(
'-c', '--config', type=str, default='./config.yaml',
dest='config_file_path',
help='配置文件路径',
)
parsed = parser.parse_args(args)
return Arguments(
config_file_path=parsed.config_file_path,
)
def main():
args = parse_args(sys.argv[1:])
config = load_config(args.config_file_path)
client = config.client.create_client()
with psycopg2.connect(config.database.connection_string) as conn_activity, \
psycopg2.connect(config.database.connection_string) as conn_db:
activity = Activity(conn=conn_activity,
activity_type='check_completed',
run_at=datetime.now(tz=local_tz))
db = DB(conn=conn_db,
completion_registry_thread_id=config.completion_registry_thread_id)
stats = Stats()
message = None
is_successful = True
try:
scan_finished_threads(db, client, stats)
except:
exc_text = traceback.format_exc()
logging.critical(exc_text)
message = exc_text
is_successful = False
finally:
activity.report_end(is_successful, message, stats)
if is_successful:
logging.info("成功结束")
else:
exit(1)
def scan_finished_threads(db: DB, client: anobbsclient.Client, stats: Stats):
# 挑出登记完结的串中,尚未蓝字标记完结、今日未曾出现、未被删除的串。
# 扫描这些串,防止晚标了蓝字导致串被漏掉。
# 不过没有登记还被晚标的话就没辙了
for id in db.get_thread_ids_in_completion_registry_thread_without_blue_texts():
[page, usage] = client.get_thread_page(id=id, page=1,
for_analysis=True)
stats.total_bandwidth_usage.add(usage)
db.record_thread(thread=page, board_id=int(page._raw['fid']),
updated_at=datetime.now(tz=local_tz))
if __name__ == '__main__':
main()
| {"/0_tick.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/publication_record.py"], "/1_collect.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/fun/generate_wordcloud.py": ["/commons/consts.py", "/commons/config.py"], "/commons/include_filters.py": ["/commons/thread_stats.py"], "/3_generate_text_report.py": ["/commons/consts.py", "/commons/config.py", "/commons/thread_stats.py", "/commons/include_filters.py", "/models/analyzing.py", "/models/publication_record.py", "/commons/debugging.py", "/fun/generate_wordcloud.py"], "/2.6_check_completed.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/debugging.py": ["/models/analyzing.py"], "/2.5_check_disappeared.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/thread_stats.py": ["/commons/consts.py"], "/db/migrate.py": ["/models/publication_record.py", "/models/activity.py"], "/models/analyzing.py": ["/commons/consts.py", "/commons/thread_stats.py"], "/models/collecting.py": ["/commons/consts.py"]} |
52,102 | FToovvr/adnmb-quests-watcher | refs/heads/master | /models/publication_record.py | from typing import Optional, List
from dataclasses import dataclass, field
from datetime import datetime, date
import uuid
import json
import psycopg2
@dataclass(frozen=True)
class PublishedPost:
report_page_number: int
reply_post_id: Optional[int]
reply_offset: Optional[int]
@dataclass(frozen=True)
class PublicationRecord:
conn: psycopg2._psycopg.connection
subject_date: date
report_type: str
uuid: Optional[str] = None
_id: int = field(init=False)
@staticmethod
def is_report_published(conn: psycopg2._psycopg.connection, subject_date: date, report_type: str) -> bool:
with conn.cursor() as cur:
cur: psycopg2._psycopg.cursor = cur
cur.execute(r'''SELECT * FROM is_report_published(%s, %s)''',
(subject_date, report_type))
return cur.fetchone()[0]
def __post_init__(self):
assert(self.report_type in ['trend', 'new_threads'])
with self.conn.cursor() as cur:
cur: psycopg2._psycopg.cursor = cur
cur.execute(r'''SELECT * FROM get_publication_record_id_and_create_record_if_needed(%s, %s, %s)''',
(self.subject_date, self.report_type, self.uuid))
object.__setattr__(self, '_id', cur.fetchone()[0])
cur.execute(r'''SELECT * FROM get_publication_record_uuid(%s)''',
(self._id,))
uuid = cur.fetchone()[0]
if (not self.uuid):
object.__setattr__(self, 'uuid', uuid)
else:
assert(self.uuid == uuid)
@property
def is_done(self) -> bool:
posts = self.reply_posts
if len(posts) == 0:
return False
for post in posts:
if post.reply_post_id is None:
return False
return True
@property
def attempts(self) -> int:
with self.conn.cursor() as cur:
cur: psycopg2._psycopg.cursor = cur
cur.execute(r'''SELECT * FROM get_publication_record_attempts(%s)''',
(self._id,))
return cur.fetchone()[0]
def increase_attempts(self):
with self.conn.cursor() as cur:
cur: psycopg2._psycopg.cursor = cur
cur.execute(r'''CALL increase_publication_record_attempts(%s)''',
(self._id,))
@property
def reply_posts(self) -> List[PublishedPost]:
posts = []
with self.conn.cursor() as cur:
cur: psycopg2._psycopg.cursor = cur
cur.execute(r'''SELECT * FROM get_publication_pages_response_info(%s)''',
(self._id,))
for row in cur:
posts.append(PublishedPost(
report_page_number=row[0],
reply_post_id=row[1],
reply_offset=row[2],
))
return posts
def report_thread_id_and_reply_count(self, thread_id: int, reply_count: int):
with self.conn.cursor() as cur:
cur: psycopg2._psycopg.cursor = cur
cur.execute(r'''CALL report_publication_destination_thread_id_and_page_count(%s, %s, %s)''',
(self._id, thread_id, reply_count))
def report_found_reply_post(self, report_page_number: int, post_id: int, offset: int):
with self.conn.cursor() as cur:
cur: psycopg2._psycopg.cursor = cur
cur.execute(r'''CALL report_found_publication_page(%s, %s, %s, %s)''',
(self._id, report_page_number, post_id, offset))
| {"/0_tick.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/publication_record.py"], "/1_collect.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/fun/generate_wordcloud.py": ["/commons/consts.py", "/commons/config.py"], "/commons/include_filters.py": ["/commons/thread_stats.py"], "/3_generate_text_report.py": ["/commons/consts.py", "/commons/config.py", "/commons/thread_stats.py", "/commons/include_filters.py", "/models/analyzing.py", "/models/publication_record.py", "/commons/debugging.py", "/fun/generate_wordcloud.py"], "/2.6_check_completed.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/debugging.py": ["/models/analyzing.py"], "/2.5_check_disappeared.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/thread_stats.py": ["/commons/consts.py"], "/db/migrate.py": ["/models/publication_record.py", "/models/activity.py"], "/models/analyzing.py": ["/commons/consts.py", "/commons/thread_stats.py"], "/models/collecting.py": ["/commons/consts.py"]} |
52,103 | FToovvr/adnmb-quests-watcher | refs/heads/master | /commons/debugging.py | from datetime import datetime
import sys
sys.path.append("..") # noqa
from models.analyzing import ThreadStats
super_huge_thread_pg = ThreadStats(
id=123456789,
created_at=datetime(2006, 1, 2, 15, 4, 5, 999999),
is_new=False,
is_disappeared=False,
title=None, name=None,
raw_content="测试撑爆"*100+'\nfoo\n<font color="blue">' + "撑爆" * 100 + '</font>',
increased_response_count=987654,
total_reply_count=1234567,
increased_response_count_by_po=123456,
distinct_cookie_count=654321,
increased_character_count=100000,
increased_character_count_by_po=100000,
blue_text="撑爆" * 100,
are_blue_texts_new=True,
)
| {"/0_tick.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/publication_record.py"], "/1_collect.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/fun/generate_wordcloud.py": ["/commons/consts.py", "/commons/config.py"], "/commons/include_filters.py": ["/commons/thread_stats.py"], "/3_generate_text_report.py": ["/commons/consts.py", "/commons/config.py", "/commons/thread_stats.py", "/commons/include_filters.py", "/models/analyzing.py", "/models/publication_record.py", "/commons/debugging.py", "/fun/generate_wordcloud.py"], "/2.6_check_completed.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/debugging.py": ["/models/analyzing.py"], "/2.5_check_disappeared.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/thread_stats.py": ["/commons/consts.py"], "/db/migrate.py": ["/models/publication_record.py", "/models/activity.py"], "/models/analyzing.py": ["/commons/consts.py", "/commons/thread_stats.py"], "/models/collecting.py": ["/commons/consts.py"]} |
52,104 | FToovvr/adnmb-quests-watcher | refs/heads/master | /2.5_check_disappeared.py | #!/usr/bin/env python3
from typing import List
from dataclasses import dataclass
import traceback
import logging
import logging.config
import argparse
from datetime import datetime, date, timedelta
import sys
from pathlib import Path
import psycopg2
import anobbsclient
from anobbsclient.walk import create_walker, BoardWalkTarget
from commons.consts import local_tz, get_target_date
from commons.config import load_config, ClientConfig
from models.activity import Activity, Stats
from models.collecting import DB
logging.config.fileConfig('logging.2.5_check_status_of_threads.conf')
@dataclass(frozen=True)
class Arguments:
config_file_path: str
since: datetime
board_id: int
completion_registry_thread_id: int # 其实不需要
connection_string: str
client_config: ClientConfig
def parse_args(args: List[str]) -> Arguments:
parser = argparse.ArgumentParser(
description="检查截止到指定位置活动过的主题串是否仍然存在。"
)
parser.add_argument(
'-c', '--config', type=str, default='./config.yaml',
dest='config_file_path',
help='配置文件路径',
)
parser.add_argument(
'since', type=str, nargs='?',
help='\n'.join([
"截止到的日期或日期+时间,格式为 RFC 3339。",
"省缺则为四个小时前的前一天的上午4时",
]),
)
parsed = parser.parse_args(args)
config = load_config(parsed.config_file_path)
if parsed.since is None:
parsed.since = get_target_date().isoformat()
parsed.since = parsed.since.strip()
if 'T' in parsed.since or ' ' in parsed.since:
parsed.since = datetime.fromisoformat(
parsed.since).replace(tzinfo=local_tz)
else:
parsed.since = datetime.fromisoformat(
f'{parsed.since} 04:00:00').replace(tzinfo=local_tz)
return Arguments(
config_file_path=parsed.config_file_path,
since=parsed.since,
board_id=config.board_id,
completion_registry_thread_id=config.completion_registry_thread_id,
connection_string=config.database.connection_string,
client_config=config.client,
)
def main():
args = parse_args(sys.argv[1:])
client = args.client_config.create_client()
with psycopg2.connect(args.connection_string) as conn_activity, \
psycopg2.connect(args.connection_string) as conn_db:
activity = Activity(conn=conn_activity,
activity_type='check_disappeared',
run_at=datetime.now(tz=local_tz))
db = DB(conn=conn_db,
completion_registry_thread_id=args.completion_registry_thread_id)
stats = Stats()
message = None
is_successful = True
try:
rescan_board(args, db, client, stats)
except:
exc_text = traceback.format_exc()
logging.critical(exc_text)
message = exc_text
is_successful = False
finally:
activity.report_end(is_successful, message, stats)
if is_successful:
logging.info("成功结束")
else:
exit(1)
def rescan_board(args: argparse.Namespace, db: DB, client: anobbsclient.Client, stats: Stats):
# 用于检测当天消失的串,
# 但如果当天消失的串最后上浮的时间在当天之前,就无法检测到了
thread_ids_seen_today = set(db.get_thread_ids_seen_since(args.since))
walker = create_walker(
target=BoardWalkTarget(
start_page_number=1,
board_id=args.board_id,
stop_before_datetime=args.since,
),
client=client,
)
for (_, page, usage) in walker:
page: List[anobbsclient.BoardThread] = page
now = datetime.now(tz=local_tz)
stats.board_request_count += 1
stats.total_bandwidth_usage.add(usage)
for thread in page:
thread_ids_seen_today.discard(thread.id)
db.record_thread(thread, board_id=args.board_id, updated_at=now)
db.report_is_thread_disappeared(thread.id, now, False)
for not_found_thread_id in thread_ids_seen_today:
# 只若先前没有发现消失,才会对此更新
if not db.is_thread_disappeared(not_found_thread_id):
logging.info(f"发现 {not_found_thread_id} 消失")
db.report_is_thread_disappeared(
not_found_thread_id, now, True)
if __name__ == '__main__':
main()
| {"/0_tick.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/publication_record.py"], "/1_collect.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/fun/generate_wordcloud.py": ["/commons/consts.py", "/commons/config.py"], "/commons/include_filters.py": ["/commons/thread_stats.py"], "/3_generate_text_report.py": ["/commons/consts.py", "/commons/config.py", "/commons/thread_stats.py", "/commons/include_filters.py", "/models/analyzing.py", "/models/publication_record.py", "/commons/debugging.py", "/fun/generate_wordcloud.py"], "/2.6_check_completed.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/debugging.py": ["/models/analyzing.py"], "/2.5_check_disappeared.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/thread_stats.py": ["/commons/consts.py"], "/db/migrate.py": ["/models/publication_record.py", "/models/activity.py"], "/models/analyzing.py": ["/commons/consts.py", "/commons/thread_stats.py"], "/models/collecting.py": ["/commons/consts.py"]} |
52,105 | FToovvr/adnmb-quests-watcher | refs/heads/master | /commons/thread_stats.py | from typing import Optional, Union, List
from dataclasses import dataclass
from datetime import datetime
import statistics
from bs4 import BeautifulSoup
import regex
from .consts import ZWSP, WORD_JOINER, OMITTING
@dataclass(frozen=True)
class ThreadStats:
id: int
created_at: datetime
is_new: bool
is_disappeared: bool
title: Optional[str]
name: Optional[str]
raw_content: str
total_reply_count: int
increased_response_count: int
increased_response_count_by_po: int
distinct_cookie_count: int
increased_character_count: int
increased_character_count_by_po: int
blue_text: Optional[str]
are_blue_texts_new: bool
@property
def content(self) -> str:
return BeautifulSoup(self.raw_content, features='html.parser').get_text()
@staticmethod
def make_text_unsearchable(text: str) -> str:
"""在文本中间插入空格,以防止报告内容污染搜索结果"""
def insert_zwsps_fn(match_obj):
return ZWSP.join(list(match_obj.group(0)))
text = regex.sub(r'\p{han}+', insert_zwsps_fn, text)
def insert_word_joiner_fn(match_obj):
return WORD_JOINER.join(list(match_obj.group(0)))
text = regex.sub(r'\p{latin}+', insert_word_joiner_fn, text)
return text
def generate_summary(self, free_lines: int) -> str:
# TODO: 其实插入 zwsp 放外部更合适?
lines = []
if self.title is not None and len(self.title) > 0:
title = self.title.replace(ZWSP, '')
if len(title) > 15: # 以防万一
title = title[:14] + OMITTING
free_lines -= 1
lines += [
f"标题:{ThreadStats.make_text_unsearchable(title)}"]
if self.name is not None and len(self.name) > 0:
name = self.name.replace(ZWSP, '')
if len(name) > 15: # 以防万一
name = name[:14] + OMITTING
free_lines -= 1
lines += [
f"名称:{ThreadStats.make_text_unsearchable(name)}"]
for content_line in self.content.split('\n'):
if free_lines == 0:
lines += [OMITTING]
break
content_line = content_line.rstrip()
line_to_add = ""
for line_part in [content_line[i: i+16] for i in range(0, len(content_line), 16)]:
if free_lines == 0:
line_to_add += OMITTING
break
line_to_add += line_part.replace(ZWSP, '')
free_lines -= 1
lines += [
ThreadStats.make_text_unsearchable(line_to_add)]
while True:
if len(lines) == 0:
break
if lines[-1].strip() == "":
lines.pop()
else:
break
return "\n".join(lines)
@dataclass # (frozen=True)
class Counts:
threads: int
new_threads: int
new_posts: int
thread_new_post_average: int
thread_new_post_quartiles: List[Union[float, int]]
thread_new_post_variance: float
def __init__(self, threads: List[ThreadStats]):
self.threads = len(threads)
self.new_threads = len(list(filter(lambda x: x.is_new, threads)))
new_post_counts = list(
map(lambda x: x.increased_response_count, threads))
self.new_posts = sum(new_post_counts)
if self.threads == 0:
self.thread_new_post_average = 0 # 或者 None?
self.thread_new_post_quartiles = [0] * 3
self.thread_new_post_variance = 0
return
self.thread_new_post_average = self.new_posts / self.threads
q = statistics.quantiles(new_post_counts)
q = list(map(lambda x: int(x) if x.is_integer else x, q))
self.thread_new_post_quartiles = q
self.thread_new_post_variance = statistics.variance(new_post_counts)
| {"/0_tick.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/publication_record.py"], "/1_collect.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/fun/generate_wordcloud.py": ["/commons/consts.py", "/commons/config.py"], "/commons/include_filters.py": ["/commons/thread_stats.py"], "/3_generate_text_report.py": ["/commons/consts.py", "/commons/config.py", "/commons/thread_stats.py", "/commons/include_filters.py", "/models/analyzing.py", "/models/publication_record.py", "/commons/debugging.py", "/fun/generate_wordcloud.py"], "/2.6_check_completed.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/debugging.py": ["/models/analyzing.py"], "/2.5_check_disappeared.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/thread_stats.py": ["/commons/consts.py"], "/db/migrate.py": ["/models/publication_record.py", "/models/activity.py"], "/models/analyzing.py": ["/commons/consts.py", "/commons/thread_stats.py"], "/models/collecting.py": ["/commons/consts.py"]} |
52,106 | FToovvr/adnmb-quests-watcher | refs/heads/master | /models/activity.py | from typing import Optional, List
from dataclasses import dataclass, field
import logging
from datetime import datetime
import psycopg2
import anobbsclient
@dataclass
class TotalBandwidthUsage:
usages: List[anobbsclient.BandwidthUsage] = field(default_factory=list)
def add(self, new_usage: anobbsclient.BandwidthUsage):
self.usages.append(new_usage)
@property
def total(self) -> anobbsclient.BandwidthUsage:
total_uploaded, total_downloaded = 0, 0
for [uploaded, downloaded] in self.usages:
total_uploaded += uploaded or 0
total_downloaded += downloaded or 0
return [total_uploaded, total_downloaded]
@dataclass
class Stats:
new_thread_count = 0
affected_thread_count = 0
new_post_count = 0
board_request_count = 0
thread_request_count = 0
logged_in_thread_request_count = 0
total_bandwidth_usage: TotalBandwidthUsage = field(
default_factory=TotalBandwidthUsage)
@dataclass(frozen=True)
class Activity:
conn: psycopg2._psycopg.connection
activity_type: str
run_at: Optional[datetime] = None
logger: Optional[logging.Logger] = field(
default_factory=lambda: logging.getLogger('DB')) # TODO: 改成 'Activity'?
activity_id: int = field(init=False)
@staticmethod
def get_last_activity_run_at(conn: psycopg2._psycopg.connection, activity_type: str):
with conn.cursor() as cur:
cur: psycopg2._psycopg.cursor = cur
cur.execute(r'SELECT get_last_activity_run_at(%s)',
(activity_type,))
return cur.fetchone()[0]
def __post_init__(self):
with self.conn.cursor() as cur:
cur: psycopg2._psycopg.cursor = cur
cur.execute(r'SELECT * FROM create_new_activity_and_return_id(%s, %s)',
(self.activity_type, self.run_at))
object.__setattr__(self, 'activity_id', cur.fetchone()[0])
if self.logger:
self.logger.info(f'已开始新活动。活动 id = {self.activity_id},'
+ f'活动类型 = {self.activity_type}')
@staticmethod
def never_collected(conn: psycopg2._psycopg.connection) -> bool:
with conn.cursor() as cur:
cur: psycopg2._psycopg.cursor = cur
cur.execute(r'SELECT * FROM never_collected()')
return cur.fetchone()[0]
@property
def should_collect_since(self) -> datetime:
# XXX: 原来调用这里会同时更新 `fetched_since`,
# 现在 `fetched_since` 会在 `report_end` 时再更新
with self.conn.cursor() as cur:
cur: psycopg2._psycopg.cursor = cur
cur.execute(r'SELECT * FROM should_collect_since()')
return cur.fetchone()[0]
def report_collecting_range(self, since: datetime, until: datetime):
if self.logger:
self.logger.info(
f'正在汇报本次活动抓取时间范围。活动 ID = {self.activity_id},'
+ f'此下限 = {since},此上限 = {until}')
with self.conn.cursor() as cur:
cur: psycopg2._psycopg.cursor = cur
cur.execute(r'CALL report_collecting_range(%s, %s, %s)',
(self.activity_id, since, until))
if self.logger:
self.logger.info(f'已汇报本次活动抓取时间范围。活动 ID = {self.activity_id}')
def report_end(self, is_successful: bool, message: Optional[str], stats: Stats):
total_usage = stats.total_bandwidth_usage.total
if self.logger:
self.logger.info(
f'正在汇报本次活动结果。活动 ID = {self.activity_id},成功 = {is_successful},'
+ f'上传字节数 = {total_usage[0]},下载字节数 = {total_usage[1]},'
+ f'新记录串数 = {stats.new_thread_count},有新增回应串数 = {stats.affected_thread_count},'
+ f'新记录回应数 = {stats.new_post_count},'
+ f'请求版块页面次数 = {stats.board_request_count},请求串页面次数 = {stats.thread_request_count},'
+ f'以登录状态请求串页面次数 = {stats.logged_in_thread_request_count}')
with self.conn.cursor() as cur:
cur: psycopg2._psycopg.cursor = cur
cur.execute(r'CALL report_end(' + ', '.join(['%s']*11) + ')',
(
self.activity_id,
is_successful, message,
total_usage[0], total_usage[1],
stats.new_thread_count, stats.affected_thread_count,
stats.new_post_count,
stats.board_request_count, stats.thread_request_count,
stats.logged_in_thread_request_count,
))
if self.logger:
self.logger.info(
f'已汇报本次活动结果。活动 ID = {self.activity_id}')
| {"/0_tick.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/publication_record.py"], "/1_collect.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/fun/generate_wordcloud.py": ["/commons/consts.py", "/commons/config.py"], "/commons/include_filters.py": ["/commons/thread_stats.py"], "/3_generate_text_report.py": ["/commons/consts.py", "/commons/config.py", "/commons/thread_stats.py", "/commons/include_filters.py", "/models/analyzing.py", "/models/publication_record.py", "/commons/debugging.py", "/fun/generate_wordcloud.py"], "/2.6_check_completed.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/debugging.py": ["/models/analyzing.py"], "/2.5_check_disappeared.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/thread_stats.py": ["/commons/consts.py"], "/db/migrate.py": ["/models/publication_record.py", "/models/activity.py"], "/models/analyzing.py": ["/commons/consts.py", "/commons/thread_stats.py"], "/models/collecting.py": ["/commons/consts.py"]} |
52,107 | FToovvr/adnmb-quests-watcher | refs/heads/master | /db/migrate.py | #!/usr/bin/env python3
from typing import Optional
import sys
import json
import sqlite3
import psycopg2
import psycopg2.extras
from datetime import datetime, date
from dateutil import tz
import sys
sys.path.append("..") # noqa
from models.publication_record import PublicationRecord
from models.activity import Activity, Stats, TotalBandwidthUsage
local_tz = tz.gettz('Asia/Shanghai')
QST_BOARD_ID = 111
def ts2dt(ts):
return datetime.fromtimestamp(ts, tz=local_tz) if ts else None
def count_rows(conn_s3: sqlite3.Connection, table: str):
# XXX: 反正也不会有用户输入
return conn_s3.execute(f'SELECT count(*) FROM {table}').fetchone()[0]
if len(sys.argv) != 3:
exit(1)
def main():
conn_s3 = sqlite3.connect(sys.argv[1])
with psycopg2.connect(sys.argv[2]) as conn_pg:
conn_pg: psycopg2._psycopg.connection = conn_pg
with conn_pg.cursor() as cur_pg:
cur_pg: psycopg2._psycopg.cursor = cur_pg
cur_pg.execute(
r'''SELECT set_config('fto.MIGRATING', %s::text, FALSE)''', (True,))
cur_pg.execute(
r'''SELECT set_config('fto.COMPLETION_REGISTRY_THREAD_ID', %s::text, FALSE)''', (22762342,))
for migrate_fn in [
migrate_activity_table,
migrate_publication_tables,
migrate_thread_table,
migrate_post_table,
]:
print(f"start: {migrate_fn.__name__}")
migrate_fn(conn_s3, conn_pg)
print(f"done: {migrate_fn.__name__}")
print(conn_pg.notifies)
conn_pg.close()
conn_s3.close()
def migrate_activity_table(conn_s3: sqlite3.Connection, conn_pg: psycopg2._psycopg.connection):
n = count_rows(conn_s3, 'activity')
last_fetched_until: datetime = None
for i, [
_, # id,
run_at, fetched_since, ensured_fetched_until, is_successful, message,
uploaded_bytes, downloaded_bytes, newly_recorded_thread_count, affected_thread_count,
newly_recorded_post_count, requested_board_page_count, requested_thread_page_count, logged_in_thread_request_count,
] in enumerate(conn_s3.execute(r'SELECT * FROM activity')):
activity = Activity(
conn=conn_pg, activity_type='legacy', run_at=ts2dt(run_at), logger=None)
if last_fetched_until is None:
assert(Activity.never_collected(conn=conn_pg))
assert(activity.should_collect_since)
else:
assert(not Activity.never_collected(conn=conn_pg))
assert(activity.should_collect_since == last_fetched_until)
activity.report_collecting_range(since=ts2dt(
fetched_since), until=ts2dt(ensured_fetched_until))
if ensured_fetched_until and is_successful:
last_fetched_until = ts2dt(ensured_fetched_until)
stats = Stats()
stats.new_thread_count = newly_recorded_thread_count
stats.affected_thread_count = affected_thread_count
stats.new_post_count = newly_recorded_post_count
stats.board_request_count = requested_board_page_count
stats.thread_request_count = requested_thread_page_count
stats.logged_in_thread_request_count = logged_in_thread_request_count
stats.total_bandwidth_usage.add([uploaded_bytes, downloaded_bytes])
activity.report_end(is_successful=bool(is_successful), message=message,
stats=stats)
if i % 100 == 0:
print(f"activity: {i+1}/{n} {ts2dt(run_at)}")
def remove_field(misc_fields: Optional[str], field_name: str) -> Optional[str]:
if misc_fields is None:
return None
misc_fields = json.loads(misc_fields)
misc_fields.pop(field_name, None)
if len(misc_fields) == 0:
return None
else:
return json.dumps(misc_fields)
def find_updated_at(conn_s3: sqlite3.Connection, id: int, updated_at: Optional[datetime]) -> datetime:
if updated_at:
return updated_at
row = conn_s3.execute(r'''
SELECT ensured_fetched_until FROM activity
WHERE ensured_fetched_until > COALESCE(
(SELECT created_at FROM post WHERE parent_thread_id = ? ORDER BY id ASC),
(SELECT created_at FROM thread WHERE id = ?)
)
ORDER BY ensured_fetched_until ASC
LIMIT 1
''', (id, id)).fetchone()
return ts2dt(row[0])
def migrate_thread_table(conn_s3: sqlite3.Connection, conn_pg: psycopg2._psycopg.connection):
with conn_pg.cursor() as cur_pg:
last = [None, None]
for [
id, content,
name, email, title,
created_at, user_id, attachment_base, attachment_extension,
misc_fields,
not_anymore_at_least_after,
] in conn_s3.execute(r'''
SELECT
thread_old_revision.id, thread_old_revision.content,
thread_old_revision.name, thread_old_revision.email, thread_old_revision.title,
thread.created_at, thread.user_id, thread.attachment_base, thread.attachment_extension,
thread.misc_fields,
not_anymore_at_least_after
FROM thread_old_revision
LEFT JOIN thread ON thread_old_revision.id = thread.id
ORDER BY thread_old_revision.id ASC, not_anymore_at_least_after ASC
'''):
updated_at = None
if last[0] == id:
updated_at = ts2dt(last[1])
else:
updated_at = find_updated_at(conn_s3, id, None)
misc_fields = remove_field(misc_fields, 'fid')
cur_pg.execute(r'''
CALL record_thread(''' + ', '.join([r'%s']*13) + r''')
''', (
id, QST_BOARD_ID, ts2dt(created_at), user_id, content,
attachment_base or '', attachment_extension or '',
name or '', email or '', title or '',
misc_fields or psycopg2.extras.Json(None),
None,
updated_at,
))
last = [id, not_anymore_at_least_after]
for [
id, created_at, user_id, content, current_reply_count,
attachment_base, attachment_extension,
name, email, title, misc_fields,
latest_checked_at, is_disappeared,
current_revision_checked_at,
] in conn_s3.execute(r'''
SELECT thread.*,
checked_at AS latest_checked_at, is_disappeared,
MAX(not_anymore_at_least_after) AS current_revision_checked_at
FROM thread
LEFT JOIN thread_extra ON thread.id = thread_extra.id
LEFT JOIN thread_old_revision ON thread.id = thread_old_revision.id
GROUP BY thread.id
'''):
misc_fields = remove_field(misc_fields, 'fid')
cur_pg.execute(r'''
CALL record_thread(''' + ', '.join([r'%s']*13) + r''')
''', (
id, QST_BOARD_ID, ts2dt(created_at), user_id, content,
attachment_base or '', attachment_extension or '',
name or '', email or '', title or '',
misc_fields or psycopg2.extras.Json(None),
current_reply_count,
find_updated_at(conn_s3, id, ts2dt(
current_revision_checked_at)),
))
cur_pg.execute(r'''
CALL report_is_thread_disappeared(%s, %s, %s)
''', (id, bool(is_disappeared), ts2dt(latest_checked_at)))
def migrate_post_table(conn_s3: sqlite3.Connection, conn_pg: psycopg2._psycopg.connection):
with conn_pg.cursor() as cur_pg:
n = count_rows(conn_s3, 'post')
for i, [
id, parent_thread_id, created_at, user_id, content,
attachment_base, attachment_extension,
name, email, title, _, # misc_fields,
] in enumerate(conn_s3.execute(r'SELECT * FROM post')):
cur_pg.execute(r'''
CALL record_response(''' + ', '.join([r'%s']*12) + r''')
''', (
id, parent_thread_id, ts2dt(created_at), user_id, content,
attachment_base or '', attachment_extension or '',
name or '', email or '', title or '',
# 迁移前的代码不小心会以主串的 misc_fields 作为回应的 misc_fields
psycopg2.extras.Json(None),
None,
))
if i % 100 == 0:
print(f"post: {i+1}/{n} {ts2dt(created_at)}")
def migrate_publication_tables(conn_s3: sqlite3.Connection, conn_pg: psycopg2._psycopg.connection):
for [
record_id,
_date, _type, uuid, attempts, to_thread_id,
page_count
] in conn_s3.execute(r'''
SELECT publishing_trace.*, count(published_post.trace_id)
FROM publishing_trace
LEFT JOIN published_post ON published_post.trace_id = publishing_trace.id
GROUP BY publishing_trace.id
'''):
attempts = int(attempts)
_date = date.fromisoformat(_date)
assert(not PublicationRecord.is_report_published(
conn_pg, _date, 'trend'))
record = PublicationRecord(
conn=conn_pg, subject_date=_date,
report_type='trend', uuid=uuid,
)
for _ in range(0, attempts):
record.increase_attempts()
assert(attempts == record.attempts)
record.report_thread_id_and_reply_count(to_thread_id, page_count)
for [
_, _, # page_id, record_id,
page_number, reply_post_id, reply_offset
] in conn_s3.execute(r'SELECT * FROM published_post WHERE trace_id = ?', (record_id,)):
record.report_found_reply_post(
page_number, reply_post_id, reply_offset)
assert(record.is_done)
if __name__ == '__main__':
main()
| {"/0_tick.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/publication_record.py"], "/1_collect.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/fun/generate_wordcloud.py": ["/commons/consts.py", "/commons/config.py"], "/commons/include_filters.py": ["/commons/thread_stats.py"], "/3_generate_text_report.py": ["/commons/consts.py", "/commons/config.py", "/commons/thread_stats.py", "/commons/include_filters.py", "/models/analyzing.py", "/models/publication_record.py", "/commons/debugging.py", "/fun/generate_wordcloud.py"], "/2.6_check_completed.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/debugging.py": ["/models/analyzing.py"], "/2.5_check_disappeared.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/thread_stats.py": ["/commons/consts.py"], "/db/migrate.py": ["/models/publication_record.py", "/models/activity.py"], "/models/analyzing.py": ["/commons/consts.py", "/commons/thread_stats.py"], "/models/collecting.py": ["/commons/consts.py"]} |
52,108 | FToovvr/adnmb-quests-watcher | refs/heads/master | /models/analyzing.py | from typing import Optional, Union, Tuple, List, Dict, OrderedDict
from dataclasses import dataclass
from datetime import datetime, timedelta, date
from bs4 import BeautifulSoup
import psycopg2
import anobbsclient
import sys
sys.path.append("..") # noqa
# pylint: disable=import-error
from commons.consts import local_tz
from commons.thread_stats import ThreadStats, Counts
@dataclass(frozen=True)
class Stats:
total_bandwidth_usage: anobbsclient.BandwidthUsage
@dataclass(frozen=True)
class DB:
cur: psycopg2._psycopg.cursor
@staticmethod
def format_blue_texts(blue_texts: Optional[str]):
if blue_texts is None:
return None
return BeautifulSoup(blue_texts[0], 'html.parser').get_text()
def get_daily_threads(self, date: datetime) -> List[ThreadStats]:
lower_bound, upper_bound = self._get_boundaries(date)
self.cur.execute(r'''SELECT * FROM get_daily_threads_report(%s, %s)''',
(lower_bound, upper_bound))
rows = self.cur.fetchall()
threads: List[ThreadStats] = []
for [
id, parent_board_id, # TODO: parent_board_id 暂时还不知道要在哪筛选,毕竟现在只有 111(跑团版)一个值
created_at, is_new, is_disappeared,
title, name, content,
total_response_count,
increased_response_count,
increased_response_count_by_po,
distinct_cookie_count,
increased_character_count,
increased_character_count_by_po,
blue_texts,
are_blue_texts_new,
] in rows:
threads.append(ThreadStats(
id=id,
created_at=created_at,
is_new=is_new,
is_disappeared=is_disappeared,
title=title,
name=name,
raw_content=content,
total_reply_count=total_response_count,
increased_response_count=increased_response_count,
increased_response_count_by_po=increased_response_count_by_po,
distinct_cookie_count=distinct_cookie_count,
increased_character_count=increased_character_count,
increased_character_count_by_po=increased_character_count_by_po,
blue_text=DB.format_blue_texts(blue_texts),
are_blue_texts_new=are_blue_texts_new,
))
return threads
def get_responses_match(self, date: datetime, in_thread_id: int, content_pattern: str) -> List[Tuple[int, str, int]]:
"""
获取该日某串中匹配所给正则表达式的那些回应。
Returns
-------
[0] : int
匹配回应的串号。
[1] : str
匹配回应的内容。
[2] : int
匹配回应的偏移。
如果在匹配回应发布后到本次统计期间,有在此之后的回应被删,可能会前移。
"""
lower_bound, upper_bound = self._get_boundaries(date)
self.cur.execute(r'''SELECT * FROM get_responses_match(%s, %s, %s, %s)''',
(in_thread_id, content_pattern, lower_bound, upper_bound))
return self.cur.fetchall()
def get_daily_qst(self, date: datetime, daily_qst_thread_id: int) -> Optional[Tuple[int, int]]:
"""
获取该日的跑团日报。
Returns
-------
[0] : int
该日日报的串号。
[1] : int
该日日报的偏移。
如果在日报发布后到本次统计期间,有在此之后的回应被删,可能会前移。
"""
lower_bound, upper_bound = self._get_boundaries(date)
self.cur.execute(r'''SELECT * FROM get_daily_qst_id_and_position(%s, %s, %s)''',
(daily_qst_thread_id, lower_bound, upper_bound))
row = self.cur.fetchone()
return None if row is None else (row[0], row[1])
def get_meta_stats(self, date: datetime) -> Stats:
lower_bound, upper_bound = self._get_boundaries(date)
self.cur.execute(r'''SELECT * FROM get_meta_stats(%s, %s)''',
(lower_bound, upper_bound))
row = self.cur.fetchone()
return Stats((row[0], row[1]))
def get_tail_frequencies(self, date: datetime) -> Tuple[int, Dict[int, float]]:
# TODO: 其实没必要在这里保持有序
lower_bound, upper_bound = self._get_boundaries(date)
self.cur.execute(r'''SELECT * FROM get_tail_count(%s, %s)''',
(lower_bound, upper_bound))
rows = self.cur.fetchall()
counts = OrderedDict({r[0]: r[1] for r in rows})
sum_count = sum(counts.values())
frequencies = OrderedDict((tail, float(count) / sum_count)
for tail, count in counts.items())
return (sum_count, frequencies)
def get_consecutive_tail_counts(self, date: datetime, n: int) -> Tuple[int, int, int]:
"""
Returns
-------
[0]
位数。
[1]
个数。
[2]
0 的个数。
TODO
----
其实没必要在这里保持有序。
"""
lower_bound, upper_bound = self._get_boundaries(date)
self.cur.execute(r'''SELECT * FROM get_count_of_tail_numbers_with_consecutive_digits(%s, %s, %s)''',
(n, lower_bound, upper_bound))
return self.cur.fetchall()
def _get_boundaries(self, date: date) -> Tuple[datetime, datetime]:
lower_bound = datetime.fromisoformat(
f"{date.isoformat()} 04:00:00").replace(tzinfo=local_tz)
upper_bound = lower_bound + timedelta(days=1)
return (lower_bound, upper_bound)
| {"/0_tick.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/publication_record.py"], "/1_collect.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/fun/generate_wordcloud.py": ["/commons/consts.py", "/commons/config.py"], "/commons/include_filters.py": ["/commons/thread_stats.py"], "/3_generate_text_report.py": ["/commons/consts.py", "/commons/config.py", "/commons/thread_stats.py", "/commons/include_filters.py", "/models/analyzing.py", "/models/publication_record.py", "/commons/debugging.py", "/fun/generate_wordcloud.py"], "/2.6_check_completed.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/debugging.py": ["/models/analyzing.py"], "/2.5_check_disappeared.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/thread_stats.py": ["/commons/consts.py"], "/db/migrate.py": ["/models/publication_record.py", "/models/activity.py"], "/models/analyzing.py": ["/commons/consts.py", "/commons/thread_stats.py"], "/models/collecting.py": ["/commons/consts.py"]} |
52,109 | FToovvr/adnmb-quests-watcher | refs/heads/master | /models/collecting.py | from typing import Any, Optional, List, Dict
from dataclasses import dataclass, field
import logging
from datetime import datetime, timedelta
import json
import traceback
import psycopg2
import psycopg2.extras
import anobbsclient
import sys
sys.path.append("..") # noqa
# pylint: disable=import-error
from commons.consts import local_tz
@dataclass(frozen=True)
class DB:
conn: psycopg2._psycopg.connection
completion_registry_thread_id: int
logger: logging.Logger = field(
default_factory=lambda: logging.getLogger('DB'))
def __post_init__(self):
with self.conn.cursor() as cur:
cur: psycopg2._psycopg.cursor = cur
cur.execute(
r'''SELECT set_config('fto.MIGRATING', %s::text, FALSE)''', (False,))
cur.execute(
r'''SELECT set_config('fto.COMPLETION_REGISTRY_THREAD_ID', %s::text, FALSE)''',
(self.completion_registry_thread_id,))
def try_find_thread_latest_seen_reply_id(self, thread_id: int) -> Optional[int]:
with self.conn.cursor() as cur:
cur: psycopg2._psycopg.cursor = cur
cur.execute(r'''SELECT find_thread_latest_seen_response_id(%s)''',
(thread_id,))
return cur.fetchone()[0]
def record_thread(self, thread: anobbsclient.ThreadPage, board_id: int, updated_at: datetime):
"""
NOTE
----
暂不在这里更新串的总回应数。
"""
self.logger.info(f'正在记录/更新串信息。串号 = {thread.id}')
with self.conn.cursor() as cur:
cur: psycopg2._psycopg.cursor = cur
cur.execute(r'CALL record_thread(' + ', '.join(['%s'] * 13) + r')', (
thread.id, board_id, thread.created_at, thread.user_id, thread.content,
thread.attachment_base or '', thread.attachment_extension or '',
thread.name or '', thread.email or '', thread.title or '',
DB.__extract_misc_fields(thread) or psycopg2.extras.Json(None),
thread.total_reply_count, updated_at,
))
self.logger.info(f'已记录/更新串信息。串号 = {thread.id}')
def record_thread_replies(self, thread: anobbsclient.BoardThread, replies: List[anobbsclient.Post],
total_reply_count: int, updated_at: dataclass):
"""
记录串中新抓取到的回应。
之前的实现是开一个事务一次性把收集到的回应一起灌进去,
不过其实保持按时间顺序加入挨个就好,不用事务
Parameters
----------
thread : anobbsclient.ThreadPage
来自版块页面的串首。
replies : List[anobbsclient.Post]
本次要记录的全部回应。
total_reply_count : int
串回应总数。
单独列出来是因为抓取途中可能会有变化,采用抓取到最后时的数字。
"""
self.logger.info(f'正在记录串中新抓取到的回应。串号 = {thread.id},'
+ f'新记录回应数 = {len(replies)},更新的回应总数 = {total_reply_count}')
replies = sorted(replies, key=lambda r: r.id)
with self.conn.cursor() as cur:
cur: psycopg2._psycopg.cursor = cur
cur.execute(r'CALL update_thread_extra_current_reply_count(%s, %s)',
(thread.id, total_reply_count))
for post in replies:
cur.execute(r'CALL record_response(' + ', '.join(['%s'] * 12) + r')', (
post.id, thread.id, post.created_at, post.user_id, post.content,
post.attachment_base or '', post.attachment_extension or '',
post.name or '', post.email or '', post.title or '',
DB.__extract_misc_fields(
post) or psycopg2.extras.Json(None),
updated_at)
)
self.logger.info(f'已记录串中新抓取到的回应。串号 = {thread.id}')
def get_thread_total_reply_count(self, thread_id: int) -> int:
with self.conn.cursor() as cur:
cur: psycopg2._psycopg.cursor = cur
cur.execute(r'SELECT get_thread_total_response_count(%s)',
(thread_id,))
return cur.fetchone()[0]
def is_thread_recorded(self, thread_id: int) -> bool:
with self.conn.cursor() as cur:
cur: psycopg2._psycopg.cursor = cur
cur.execute(r'SELECT is_thread_in_database(%s)',
(thread_id,))
return cur.fetchone()[0]
@ staticmethod
def __extract_misc_fields(post: anobbsclient.Post) -> Optional[Dict[str, Any]]:
post_raw: Dict[str, Any] = post.raw_copy()
for key in ['id', 'img', 'ext', 'now', 'userid', 'name', 'email', 'title', 'content',
'replys', 'replyCount', 'remainReplys']:
post_raw.pop(key, None)
if not post.marked_sage:
post_raw.pop('sage', None)
if not post.marked_admin:
post_raw.pop('admin', None)
if post_raw.get('status', None) == 'n':
post_raw.pop('status')
post_raw.pop('fid', None)
if len(post_raw) == 0:
return None
return json.dumps(post_raw)
def get_thread_ids_seen_since(self, datetime: datetime) -> List[int]:
"""
获取曾在指定时间后看到过的各串的串号
"""
with self.conn.cursor() as cur:
cur: psycopg2._psycopg.cursor = cur
cur.execute(r'SELECT get_thread_ids_seen_since(%s)',
(datetime,))
return cur.fetchone()[0]
def is_thread_disappeared(self, thread_id: int) -> bool:
with self.conn.cursor() as cur:
cur: psycopg2._psycopg.cursor = cur
cur.execute(r'SELECT is_thread_disappeared(%s)',
(thread_id,))
return cur.fetchone()[0]
def report_is_thread_disappeared(self, thread_id: int, checked_at: datetime, value: bool):
with self.conn.cursor() as cur:
cur: psycopg2._psycopg.cursor = cur
cur.execute(r'CALL update_thread_extra_is_disappeared(%s, %s, %s)',
(thread_id, checked_at, value))
def get_thread_ids_in_completion_registry_thread_without_blue_texts(self) -> List[int]:
with self.conn.cursor() as cur:
cur: psycopg2._psycopg.cursor = cur
cur.execute(
r'SELECT * FROM get_thread_ids_in_completion_registry_thread_without_blue_texts()')
return cur.fetchone()[0]
| {"/0_tick.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/publication_record.py"], "/1_collect.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/fun/generate_wordcloud.py": ["/commons/consts.py", "/commons/config.py"], "/commons/include_filters.py": ["/commons/thread_stats.py"], "/3_generate_text_report.py": ["/commons/consts.py", "/commons/config.py", "/commons/thread_stats.py", "/commons/include_filters.py", "/models/analyzing.py", "/models/publication_record.py", "/commons/debugging.py", "/fun/generate_wordcloud.py"], "/2.6_check_completed.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/debugging.py": ["/models/analyzing.py"], "/2.5_check_disappeared.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/thread_stats.py": ["/commons/consts.py"], "/db/migrate.py": ["/models/publication_record.py", "/models/activity.py"], "/models/analyzing.py": ["/commons/consts.py", "/commons/thread_stats.py"], "/models/collecting.py": ["/commons/consts.py"]} |
52,110 | FToovvr/adnmb-quests-watcher | refs/heads/master | /db/_test_remove_contents_spaces.py | #!/usr/bin/env python3
import re
import psycopg2
from bs4 import BeautifulSoup
def main():
with open('password.secret', 'r') as password_file:
password = password_file.read().strip()
conn: psycopg2._psycopg.connection = psycopg2.connect(
f'dbname=adnmb_qst_watcher user=postgres password={password} host=pi')
cur: psycopg2._psycopg.cursor = conn.cursor()
x = 0
cur.execute(r'SELECT id, content FROM post')
for i, [id, content] in enumerate(cur):
if i % 1000 == 0:
print(i+1)
with conn.cursor() as cur2:
cur2.execute(
r'''SELECT count_content_characters_works(%s)''', (content,))
[length_pg] = cur2.fetchone()
no_spaces_content_bs = re.sub(
r'\s', '', BeautifulSoup(content, 'html.parser').get_text())
if len(no_spaces_content_bs) != length_pg:
print([i+1, x+1, 'length', len(no_spaces_content_bs), length_pg,
id, content])
x += 1
if x == 10:
break
if __name__ == '__main__':
main()
| {"/0_tick.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/publication_record.py"], "/1_collect.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/fun/generate_wordcloud.py": ["/commons/consts.py", "/commons/config.py"], "/commons/include_filters.py": ["/commons/thread_stats.py"], "/3_generate_text_report.py": ["/commons/consts.py", "/commons/config.py", "/commons/thread_stats.py", "/commons/include_filters.py", "/models/analyzing.py", "/models/publication_record.py", "/commons/debugging.py", "/fun/generate_wordcloud.py"], "/2.6_check_completed.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/debugging.py": ["/models/analyzing.py"], "/2.5_check_disappeared.py": ["/commons/consts.py", "/commons/config.py", "/models/activity.py", "/models/collecting.py"], "/commons/thread_stats.py": ["/commons/consts.py"], "/db/migrate.py": ["/models/publication_record.py", "/models/activity.py"], "/models/analyzing.py": ["/commons/consts.py", "/commons/thread_stats.py"], "/models/collecting.py": ["/commons/consts.py"]} |
52,133 | ahmad-abdellatif/ChainerPruner | refs/heads/master | /chainerpruner/pruning/network_slimming/network_slimming.py | # Copyright (c) 2018 DeNA Co., Ltd.
# Licensed under The MIT License [see LICENSE for details]
from chainerpruner import Graph
from chainerpruner.rebuild import rebuild
from chainerpruner.masks import NormMask
def pruning(model, args, target_conv_layers, threshold, default=None):
"""Apply mask and rebuild for Network Slimming
Args:
model (torch.nn.Module, chainer.Chain): target model.
args: dummy inputs of target model.
target_conv_layers (list[str]):
threshold (float, dict): mask threshold for BatchNorm2d.weight.
default (float, Optional): default threshold (available only if threshold is dict).
Returns:
dict: pruning runtime information
"""
graph = Graph(model, args)
mask = NormMask(model, graph, target_conv_layers, threshold=threshold, default=default,
mask_layer='batchnorm')
info = {}
info['mask'] = mask()
info['rebuild'] = rebuild(model, graph, target_conv_layers)
return info
| {"/chainerpruner/pruning/network_slimming/__init__.py": ["/chainerpruner/pruning/network_slimming/network_slimming.py"]} |
52,134 | ahmad-abdellatif/ChainerPruner | refs/heads/master | /chainerpruner/rebuild/pytorch/modules/linear.py | # Copyright (c) 2018 DeNA Co., Ltd.
# Licensed under The MIT License [see LICENSE for details]
import torch
import torch.nn as nn
from chainerpruner.rebuild.rebuildlink import RebuildLink
from chainerpruner.rebuild.utils import log_shape
class RebuildLinear(RebuildLink):
def passive_rebuild(self, linear, mask):
# conv-linearだと影響を受ける
self.logger.debug(log_shape(linear.weight.data, mask))
input_shape = self.node.input_shape[0]
if len(input_shape) == 4 and input_shape[1] == len(mask):
# prev node is conv: conv-fc
n_out, n_in = linear.weight.shape
w = linear.weight.data.clone().reshape(n_out, *input_shape[1:])
w = w[:, mask, :, :]
w = w.reshape(n_out, -1)
else:
# conv-gap-fc, conv-view(flatten)-fc
n_out, n_in = linear.weight.shape
pixels_per_channel = input_shape[1] // len(mask)
assert mask.dim() == 1
# TODO(tkat0) refactor
# convert channel mask to pixel-level mask
flatten_mask = torch.zeros((n_in,), dtype=torch.uint8, device=mask.device, requires_grad=False)
for i, m in enumerate(mask):
m = int(m)
if m == 1:
begin, end = i * pixels_per_channel, (i + 1) * pixels_per_channel
flatten_mask[begin:end] = m
w = linear.weight.data[:, flatten_mask].clone()
linear.weight.data = w
def reinitialize(self, link: nn.Linear):
# _, in_size = link.W.shape
# link._initialize_params(in_size)
raise NotImplementedError()
def update_attributes(self, link: nn.Linear):
out_features, in_features = link.weight.shape
link.in_features = in_features
link.out_features = out_features
| {"/chainerpruner/pruning/network_slimming/__init__.py": ["/chainerpruner/pruning/network_slimming/network_slimming.py"]} |
52,135 | ahmad-abdellatif/ChainerPruner | refs/heads/master | /tests/rebuild/pytorch/test_rebuild.py | import numpy as np
from matplotlib import pyplot as plt
import pytest
from chainerpruner import Pruner, Graph
from chainerpruner.masks import NormMask
import torchvision.models as models
import torch
from torch import nn
from torch.nn import functional as F
enable_save = False
def _test_model(tmpdir, model_class, x, target_layers, percent, options=None, train=False, save=False):
# save initialized model
if not options:
options = dict()
model = model_class(**options)
if train:
model.train()
else:
model.eval()
if save:
PATH = str(tmpdir.join('model.onnx'))
torch.onnx.export(model, x, PATH, verbose=False,
input_names=['input'],
output_names=['output'])
# print(model)
# pruning with Pruner
graph = Graph(model, x)
PATH = str(tmpdir.join('model.png'))
graph.plot()
plt.savefig(PATH)
mask = NormMask(model, graph, target_layers, percent=percent)
pruner = Pruner(model, x, target_layers, mask)
pruner.apply_mask()
info = pruner.apply_rebuild()
model(x)
PATH = str(tmpdir.join('model.pth'))
torch.save(model.state_dict(), PATH)
# load pruned weight and run
model = model_class(**options)
model.load_state_dict(torch.load(PATH))
if train:
model.train()
else:
model.eval()
model(x)
# save pruned onnx
if save:
PATH = str(tmpdir.join('model_pruned.onnx'))
torch.onnx.export(model, x, PATH, verbose=False,
input_names=['input'],
output_names=['output'])
def test_resnet18(tmpdir):
model_class = models.resnet18
x = torch.randn((1, 3, 32, 32), requires_grad=False)
target_layers = [
'layer1.0.conv1',
'layer4.1.conv1',
]
percent = 0.8
_test_model(tmpdir, model_class, x, target_layers, percent, save=enable_save)
def test_mobilenetv2(tmpdir):
model_class = models.mobilenet_v2
x = torch.randn((1, 3, 32, 32), requires_grad=False)
target_layers = [
'features.0.0',
]
percent = 0.7
_test_model(tmpdir, model_class, x, target_layers, percent, save=enable_save)
def test_no_target_layers():
x = torch.randn((1, 3, 32, 32), requires_grad=False)
model = models.resnet18()
model.eval()
model(x)
percent = 0.8
target_layers = [] # empty!
graph = Graph(model, x)
mask = NormMask(model, graph, target_layers, percent=percent)
pruner = Pruner(model, x, target_layers, mask)
pruner.apply_mask()
try:
info = pruner.apply_rebuild()
raise ValueError
except ValueError:
pass
def test_block(tmpdir):
class Block(nn.Module):
def __init__(self, in_channels, out_channels, ksize, stride, pad):
super(Block, self).__init__()
self.convDW = nn.Conv2d(in_channels, in_channels, ksize, stride=stride, padding=pad, groups=in_channels, bias=False)
self.bnDW = nn.BatchNorm2d(in_channels)
self.convPW = nn.Conv2d(in_channels, out_channels, 1, stride=1, padding=0, bias=False)
self.bnPW = nn.BatchNorm2d(out_channels)
def forward(self, x):
inplace = True
# inplace = False TODO(tkat0) can't trace
h = F.relu(self.bnDW(self.convDW(x)), inplace=inplace)
h = F.relu(self.bnPW(self.convPW(h)), inplace=inplace)
return h
class SimpleNet(nn.Module):
def __init__(self):
super(SimpleNet, self).__init__()
self.conv1 = Block(3, 7, 3, 1, 0)
self.conv2 = Block(7, 15, 3, 1, 0)
def forward(self, x):
h = self.conv1(x)
h = self.conv2(h)
return h
model_class = SimpleNet
x = torch.randn((1, 3, 32, 32), requires_grad=False)
target_layers = ['conv1.convPW']
percent = 0.7
model_class()(x)
_test_model(tmpdir, model_class, x, target_layers, percent, options=None, save=enable_save)
def test_upsample(tmpdir):
class SimpleNet(nn.Module):
def __init__(self):
super(SimpleNet, self).__init__()
self.conv1 = nn.Conv2d(3, 7, 1, stride=1, padding=0, bias=False)
self.up = nn.Upsample(scale_factor=2)
self.conv2 = nn.Conv2d(7, 15, 1, stride=1, padding=0, bias=False)
def forward(self, x):
h = self.conv1(x)
h = self.up(h)
h = self.conv2(h)
return h
model_class = SimpleNet
x = torch.randn((1, 3, 32, 32), requires_grad=False)
target_layers = ['conv1']
percent = 0.7
model_class()(x)
_test_model(tmpdir, model_class, x, target_layers, percent, options=None, save=enable_save)
| {"/chainerpruner/pruning/network_slimming/__init__.py": ["/chainerpruner/pruning/network_slimming/network_slimming.py"]} |
52,136 | ahmad-abdellatif/ChainerPruner | refs/heads/master | /chainerpruner/pruning/network_slimming/pytorch.py | # Copyright (c) 2018 DeNA Co., Ltd.
# Licensed under The MIT License [see LICENSE for details]
import logging
from torch import nn
logger = logging.getLogger(__name__)
class Lasso():
"""Lasso regularization for PyTorch model."""
def __init__(self, model, rate, target_bn_layers=None):
"""Lasso regularization for PyTorch model.
Args:
model (nn.Module):
rate (float): Coefficient for Lasso.
target_bn_layers (list[str]): Node name that apply Lasso.
Returns:
list[str]: Node names that actually applied Lasso.
"""
self._model = model
self._rate = rate
self._target_layers = target_bn_layers
if self._target_layers:
count = 0
for name, node in self._model.named_modules():
if name in self._target_layers and isinstance(node, nn.BatchNorm2d):
count += 1
if count == 0:
raise AttributeError('target_layers={} does not exist in the model '
'or does not BatchNorm2d'.format(self._target_layers))
def __call__(self):
info = list()
for name, node in self._model.named_modules():
if self._target_layers and name not in self._target_layers:
continue
if isinstance(node, nn.BatchNorm2d):
info.append(name)
node.weight.grad.data.add_(self._rate * node.weight.data.sign())
return info
| {"/chainerpruner/pruning/network_slimming/__init__.py": ["/chainerpruner/pruning/network_slimming/network_slimming.py"]} |
52,137 | ahmad-abdellatif/ChainerPruner | refs/heads/master | /chainerpruner/rebuild/pytorch/same_io_channels_layers.py | # Copyright (c) 2018 DeNA Co., Ltd.
# Licensed under The MIT License [see LICENSE for details]
import torch.nn as nn
_disconnected_io_channels_layers = [
nn.Conv1d,
nn.Conv2d,
nn.Linear,
nn.ConvTranspose1d,
nn.ConvTranspose2d,
]
def is_connected_io_channels(node):
"""Return True if the number of input and output channels of node is linked
Args:
node:
Returns:
"""
if node.type == nn.Conv2d and node.link.groups > 1:
# Depthwise Convolution
return True
elif node.type in _disconnected_io_channels_layers:
return False
else:
return True # default
| {"/chainerpruner/pruning/network_slimming/__init__.py": ["/chainerpruner/pruning/network_slimming/network_slimming.py"]} |
52,138 | ahmad-abdellatif/ChainerPruner | refs/heads/master | /chainerpruner/pruning/psfp/chainer.py | # Copyright (c) 2018 DeNA Co., Ltd.
# Licensed under The MIT License [see LICENSE for details]
import chainer
from chainer.training import extension
from chainerpruner.pruning.psfp.psfp import ProgressiveSoftFilterPruning
class ProgressiveSoftFilterPruningExtension(extension.Extension):
name = 'ProgressiveSoftFilterPruning'
trigger = (1, 'epoch')
priority = chainer.training.PRIORITY_WRITER
def __init__(self, model, args, target_layers,
pruning_rate, stop_trigger, pruning_rate_decay=1 / 8, rebuild=True):
self._rebuild = rebuild
self.core = ProgressiveSoftFilterPruning(
model, args, target_layers,
pruning_rate, stop_trigger, pruning_rate_decay)
def __call__(self, trainer: chainer.training.Trainer):
updater = trainer.updater
epoch = updater.epoch
iteration = updater.iteration
step = iteration if self.trigger_type == 'iteration' else epoch
self.core(step)
def finalize(self):
if self._rebuild:
self.core.rebuild()
| {"/chainerpruner/pruning/network_slimming/__init__.py": ["/chainerpruner/pruning/network_slimming/network_slimming.py"]} |
52,139 | ahmad-abdellatif/ChainerPruner | refs/heads/master | /chainerpruner/pruning/network_slimming/__init__.py | from chainerpruner.pruning.network_slimming.network_slimming import pruning
| {"/chainerpruner/pruning/network_slimming/__init__.py": ["/chainerpruner/pruning/network_slimming/network_slimming.py"]} |
52,158 | dev216/opap | refs/heads/master | /opapmain.py | #!/usr/bin/python2.7
__author__ = 'nassos'
import os
import sys
#import urllib.request
import http.client
import json
import csv
import time
import random
from progmenu import ProgramMenu
from collections import OrderedDict
# OPAP REST Services
# {game} proto
# {extension} json
# http://applications.opap.gr/DrawsRestServices/{game}/last.{extension}
# http://applications.opap.gr/DrawsRestServices/{game}/{draw_number}.{extension}
def draw_json_data_to_tuple(draw_json_data):
return (draw_json_data['draw']['drawNo'],
draw_json_data['draw']['drawTime'],
draw_json_data['draw']['results'])
def print_proto_draw(draw_request_response):
print(draw_request_response['draw']['drawTime'])
print(draw_request_response['draw']['drawNo'])
print(draw_request_response['draw']['results'])
def fetch_proto_last_draw():
conn = http.client.HTTPConnection("applications.opap.gr")
request_str = "/DrawsRestServices/{0}/last.{1}".format("proto","json")
conn.request("GET",request_str,None,{"Accept":"text\json"})
last_draw_response = conn.getresponse()
last_draw_data = last_draw_response.read().decode('utf-8')
lastdraw_json_data = json.loads(last_draw_data)
conn.close()
return lastdraw_json_data
# http_request = urllib.request.Request("http://applications.opap.gr/DrawsRestServices/{0}/last.{1}".format("proto", "json"))
# http_request.add_header('Pragma', 'no-cache')
# response = urllib.request.urlopen(http_request)
# urllib.request.urlcleanup()
# lastdraw_text_data = response.read().decode('utf-8')
# lastdraw_json_data = json.loads(lastdraw_text_data)
# return lastdraw_json_data
def fetch_proto_draw(draw_number):
print(draw_number)
conn = http.client.HTTPConnection("applications.opap.gr")
request_str = "/DrawsRestServices/proto/{0}.json".format(draw_number)
conn.request("GET",request_str,None,{"Accept":"text\json"})
last_draw_response = conn.getresponse()
last_draw_data = last_draw_response.read().decode('utf-8')
lastdraw_json_data = json.loads(last_draw_data.strip())
conn.close()
return lastdraw_json_data
# print(draw_number)
# http_request = "http://applications.opap.gr/DrawsRestServices/proto/{0}.json".format(draw_number)
# response = urllib.request.urlopen(http_request)
# print(response)
# draw_text_data = response.read().decode('utf-8')
# draw_json_data = json.loads(draw_text_data)
# return draw_json_data
def create_drawsdata_file():
try:
fh = open("protodraws.csv","w")
return fh
except IOError:
return None
def print_statistics(winning_columns):
winning_columns_4digit_parts_dict = OrderedDict()
not_winning_columns_4digit_parts = []
for a_winning_column in winning_columns:
head_part = a_winning_column[0:4]
tail_part = a_winning_column[-4:]
if head_part in winning_columns_4digit_parts_dict.keys():
winning_columns_4digit_parts_dict[head_part] += 1
else:
winning_columns_4digit_parts_dict[head_part] = 1
if tail_part in winning_columns_4digit_parts_dict.keys():
winning_columns_4digit_parts_dict[tail_part] += 1
else:
winning_columns_4digit_parts_dict[head_part] = 1
for a_num in range(1,10000):
four_digit_str = "{0:04d}".format(a_num)
if four_digit_str not in winning_columns_4digit_parts_dict.keys():
not_winning_columns_4digit_parts.append(four_digit_str)
print_not_winning_columns_4digit_parts(not_winning_columns_4digit_parts,10)
# winning_columns_statistics_len = len(winning_columns_4digit_parts_dict.keys())
# remaining = winning_columns_statistics_len % 8
# full = int( ( winning_columns_statistics_len - remaining ) / 8 )
# lines_to_print = 0
# if remaining == 0:
# if winning_columns_statistics_len > 0:
# lines_to_print = full
# else:
# lines_to_print = full + 1
#
# row_element_counter = 0
# line_counter = 0
# line_str_to_print = ""
# for a_key in winning_columns_4digit_parts_dict.keys():
# line_str_to_print += "{0}:{1}".format(a_key,winning_columns_4digit_parts_dict[a_key])
# row_element_counter += 1
# if row_element_counter == 8:
# row_element_counter = 0
# line_str_to_print += "\n"
# line_counter += 1
# else:
# line_str_to_print += "\t"
#
# if line_counter == 10:
# print(line_str_to_print)
# line_str_to_print = ""
# row_element_counter = 0
# line_counter = 0
# next_page_prompt = input("Press any key for the next page...")
#
# if len( line_str_to_print ) > 0:
# print(line_str_to_print)
def print_not_winning_columns_4digit_parts(not_winning_columns_4digit_parts,numbers_to_generate):
print(not_winning_columns_4digit_parts)
tmplen = len(not_winning_columns_4digit_parts)
generated_numbers = []
generated_numbers_parts = []
if numbers_to_generate >= 1 and numbers_to_generate <= 10:
while len(generated_numbers) < numbers_to_generate:
index = random.randint(0,tmplen-1)
head_part = not_winning_columns_4digit_parts[index]
head_part_last_char = head_part[-1]
if head_part not in generated_numbers_parts:
generated_numbers_parts.append(head_part)
tail_part_found = False
while tail_part_found == False:
index = random.randint(0,tmplen-1)
tail_part = not_winning_columns_4digit_parts[index]
if tail_part in generated_numbers_parts:
continue
else:
tail_part_first_char = tail_part[0]
if head_part_last_char == tail_part_first_char:
generated_numbers_parts.append(tail_part)
generated_numbers.append("{0}{1}{2}".format(head_part[0:3],head_part_last_char,tail_part[-3:]))
tail_part_found = True
else:
continue
else:
continue
print(generated_numbers)
return generated_numbers
if __name__ == "__main__":
try:
print("OPAP PROTO PROGRAM")
progMenu = ProgramMenu()
progMenu.print_menu()
option_selected = input('Select an option: ')
if progMenu.validate_option(option_selected):
action_str = progMenu.get_action_str(option_selected)
action_num = progMenu.get_action_num(option_selected)
print(action_str)
if action_num == 1:
lastdraw_json_data = fetch_proto_last_draw()
print_proto_draw( lastdraw_json_data )
elif action_num == 2:
lastdraw_json_data = fetch_proto_last_draw()
print("Last 'Proto' Draw Number: ", lastdraw_json_data['draw']['drawNo'])
drawdat_fh = None
try:
drawdat_fh = open("protodraws.csv")
except IOError:
print("'PROTO' draws' data CSV file does not exist.")
drawsfile_exists = False
# Create the CSV file.
drawdat_fh = create_drawsdata_file()
if drawdat_fh == None:
raise SystemExit
reader = csv.reader(drawdat_fh)
draw_data = []
for draw_row in reader:
draw_data.append(draw_row)
last_available_draw_number = int(draw_data[-1][0])
#print(int(last_available_draw_data[0]))
if int( last_available_draw_number < lastdraw_json_data['draw']['drawNo'] ):
if drawdat_fh != None:
drawdat_fh.close()
drawdat_fh = open("protodraws.csv","a",newline='')
writer = csv.writer(drawdat_fh)
# print("Available 'PROTO' draws: {0}".format(len(draw_data)))
tmp_counter = 0
# for a_draw_num in range(-715,1,1):
# draw_json_data = fetch_proto_draw(a_draw_num)
# writer.writerow( draw_json_data_to_tuple(draw_json_data))
# tmp_counter += 1
# if tmp_counter == 250:
# time.sleep(5)
# tmp_counter = 0
#
# start_draw_num = len(draw_data) + 1
# end_draw_num = lastdraw_json_data['draw']['drawNo']
# tmp_counter = 0
for a_draw_num in range( (last_available_draw_number+1), (lastdraw_json_data['draw']['drawNo']) + 1, 1 ):
draw_json_data = fetch_proto_draw(a_draw_num)
writer.writerow( draw_json_data_to_tuple(draw_json_data))
tmp_counter += 1
if tmp_counter == 250:
time.sleep(5)
tmp_counter = 0
drawdat_fh.close()
else:
drawdat_fh.close()
print("Draws are already updated.")
# if reader.line_num > 0:
# for line in reader:
# draw_data.append(line)
# if drawdat_fh != None:
# drawdat_fh.close()
# drawdat_fh = open("protodraws.csv","a")
# writer = csv.writer(drawdat_fh)
# print("Available 'PROTO' draws: {0}".format(len(draw_data)))
# tmp_counter = 0
#
# # for a_draw_num in range(-715,1,1):
# # draw_json_data = fetch_proto_draw(a_draw_num)
# # writer.writerow( draw_json_data_to_tuple(draw_json_data))
# # tmp_counter += 1
# # if tmp_counter == 250:
# # time.sleep(5)
# # tmp_counter = 0
# #
# # start_draw_num = len(draw_data) + 1
# # end_draw_num = lastdraw_json_data['draw']['drawNo']
# # tmp_counter = 0
# #
# # for a_draw_num in range(1,end_draw_num+1,1):
# # draw_json_data = fetch_proto_draw(a_draw_num)
# # writer.writerow( draw_json_data_to_tuple(draw_json_data))
# # tmp_counter += 1
# # if tmp_counter == 250:
# # time.sleep(5)
# # tmp_counter = 0
# drawdat_fh.close()
elif action_num == 3:
drawdat_fh = open("protodraws.csv","r")
reader = csv.reader(drawdat_fh)
winning_columns = []
for draw_line_row in reader:
# print(draw_line_row)
winning_column_digits_list_str = draw_line_row[2]
winning_column_str = ""
for a_char in winning_column_digits_list_str:
if a_char.isdigit():
winning_column_str += a_char
if winning_column_str not in winning_columns:
winning_columns.append(winning_column_str)
# else:
# print(draw_line_row[0:2])
# print(winning_column_str)
drawdat_fh.close()
print(len(winning_columns))
print_statistics(winning_columns)
else:
print("SANITY CHECK ERROR")
except SystemExit:
print("Exiting script...")
sys.exit() | {"/opapmain.py": ["/progmenu.py"]} |
52,159 | dev216/opap | refs/heads/master | /progmenu.py | from xml.etree.ElementPath import _SelectorContext
__author__ = 'nassos'
class ProgramMenu:
def __init__(self):
self.options = { 0: "Get Last Draw",
1: "Update Draws",
2: "Print statistics"}
self.options_count = len(self.options.keys())
def validate_option(self, option_selected):
res = True
if option_selected.isdigit() is False:
res = False
else:
option_num = int(option_selected)
if option_num not in range(1,self.options_count+1):
res = False
return res
def print_menu(self):
res = ""
for (k, v) in self.options.items():
if k != 0:
res += "\n"
res += "{0}: {1}".format(k+1, v)
print(res)
def get_action_str(self,option_selected):
if self.validate_option(option_selected):
option_num = int(option_selected)-1
return self.options[option_num]
else:
return None
def get_action_num(self,option_selected):
if self.validate_option(option_selected):
return int(option_selected)
else:
return None
| {"/opapmain.py": ["/progmenu.py"]} |
52,177 | abolfazlvakily/django_resume_maker | refs/heads/master | /MySite/resume/views.py | from django.views.generic import ListView
from .models import Skill
class Resume(ListView):
model = Skill
| {"/MySite/resume/views.py": ["/MySite/resume/models.py"], "/MySite/resume/urls.py": ["/MySite/resume/views.py"], "/MySite/resume/models.py": ["/MySite/resume/enums.py"]} |
52,178 | abolfazlvakily/django_resume_maker | refs/heads/master | /MySite/resume/urls.py | from django.urls import path
from .views import Resume
from django.conf.urls.static import static
from django.conf import settings
app_name = 'resume'
urlpatterns = [
path('', Resume.as_view(), name='skill_list'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| {"/MySite/resume/views.py": ["/MySite/resume/models.py"], "/MySite/resume/urls.py": ["/MySite/resume/views.py"], "/MySite/resume/models.py": ["/MySite/resume/enums.py"]} |
52,179 | abolfazlvakily/django_resume_maker | refs/heads/master | /MySite/resume/enums.py | from django.utils.translation import ugettext as _
class Priorities(object):
High = 1
Medium = 2
Low = 3
PRIORITIES = (
(Priorities.High, _('High priority')),
(Priorities.Medium, _('Medium priority')),
(Priorities.Low, _('Low priority'))
)
| {"/MySite/resume/views.py": ["/MySite/resume/models.py"], "/MySite/resume/urls.py": ["/MySite/resume/views.py"], "/MySite/resume/models.py": ["/MySite/resume/enums.py"]} |
52,180 | abolfazlvakily/django_resume_maker | refs/heads/master | /MySite/resume/models.py | from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from ckeditor.fields import RichTextField
from .enums import Priorities, PRIORITIES
class Skill(models.Model):
# هرکاربر سیستمی می تواند چندین مهارت داشته باشد
user_id = models.ForeignKey(User, on_delete=models.CASCADE, related_name='has', db_column='user_id',
verbose_name=_('end_user'))
title = models.CharField(max_length=150, db_column='title', verbose_name=_('title'))
description = RichTextField()
priority = models.IntegerField(choices=PRIORITIES, default=Priorities.Medium, db_column='priority',
verbose_name=_('priority'))
image = models.ImageField(db_column='image', verbose_name=_('image'), upload_to='Images')
created_date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
class Meta:
ordering = ['created_date']
verbose_name = _('CV')
verbose_name_plural = _('CVs')
db_table = 'CVs'
| {"/MySite/resume/views.py": ["/MySite/resume/models.py"], "/MySite/resume/urls.py": ["/MySite/resume/views.py"], "/MySite/resume/models.py": ["/MySite/resume/enums.py"]} |
52,190 | 5l1v3r1/Discord-Nitro-Generator | refs/heads/main | /core/checker.py | import asyncio
import aiohttp
from aiohttp_socks import ProxyConnector
from core.logging import Logging
from typing import Tuple
class Checker:
def __init__(self) -> None:
self.url = 'https://discord.com/api/v9/entitlements/gift-codes/%s?with_application=true&with_subscription_plan=true'
async def check(self, code: str, proxy: Tuple[int, str, int]) -> Tuple[str, bool]:
""" Check a potential Nitro gift code. """
url = self.url % code
type, proxy_host, proxy_port = proxy
try:
async with aiohttp.ClientSession(connector=ProxyConnector.from_url('socks%d://%s:%s' % (type, proxy_host, proxy_port))) as session:
async with session.get(url=url) as response:
json_data = await response.json()
# Code is valid
if response.status == 200:
Logging.success(
f'valid code: {code}'
)
return (code, True)
# response
message = json_data['message']
# We have been rate limited by discord, need to wait a given time
if message == 'You are being rate limited.':
to_wait = json_data['retry_after'] # need to wait that time
Logging.info(
f'{proxy_host}:{proxy_port} have been rate limited, waiting {to_wait}s.'
)
await asyncio.sleep(to_wait + 1) # wait until we get "unbanned" by Discord, adding a second for security
return (code, None)
# Code is invalid
elif message == 'Unknown Gift Code':
Logging.info(
f'invalid code: {code}'
)
return (code, False)
else:
Logging.info(
f'unknown message (code: {code}): {message}'
)
except Exception:
pass
return (code, None) | {"/core/checker.py": ["/core/logging.py"], "/main.py": ["/core/logging.py", "/core/checker.py", "/core/generator.py"]} |
52,191 | 5l1v3r1/Discord-Nitro-Generator | refs/heads/main | /main.py | """
TODO:
- socks4 / socks5 / http proxies support (only 4 for the moment)
- send the code it to a webhook
- remove dead proxies
"""
import itertools
import asyncio
import re
from core.logging import Logging
from core.checker import Checker
from core.generator import Generator
async def main():
Logging.console.print(
'''
[red]+[bright_black] -- [magenta]Nitro Generator & Checker[/magenta] --[/bright_black]+[/red]
[red]+[bright_black]- -- [bright_blue]twitter.com/toastakerman[/bright_blue] --[/bright_black]+[/red]
[red]+[bright_black]- -- -- [cyan]github.com/traumatism[/cyan] --[/bright_black]+[/red]
'''
)
# load proxies from 'proxies.txt'
with open('proxies.txt', 'r') as file:
proxies = set([
(4, str(line.split(':')[0]), int(line.split(':')[1])) # (type: int, ip_address: str, port: int)
for line in [x.strip() for x in file.readlines()]
if re.match(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}', line) # validate the line
])
Logging.success(f'loaded {len(proxies)} proxies.')
proxies = itertools.cycle(proxies)
while 1:
codes = Generator.generate() # generate 1k potential gift codes.
Logging.success(f'generated {len(codes)} potential gift codes.')
checker = Checker() # initialize the checker class
# load the futures
futures = []
for code in codes:
proxy = proxies.__next__() # choose the proxy
futures.append(asyncio.create_task(
checker.check(code, proxy)
))
Logging.info(
f'executing {len(futures)} tasks asynchronously...'
)
# execute the futures
await asyncio.gather(
*futures
)
asyncio.run(
main()
) | {"/core/checker.py": ["/core/logging.py"], "/main.py": ["/core/logging.py", "/core/checker.py", "/core/generator.py"]} |
52,192 | 5l1v3r1/Discord-Nitro-Generator | refs/heads/main | /core/generator.py | import string
import random
from typing import List
class Generator:
@staticmethod
def generate(limit: int = 1000) -> List[str]:
""" Generate (limit) potential nitro codes. """
codes = []
while len(codes) <= limit:
code = ''.join(random.choices(
string.ascii_letters + string.digits, k=24
))
if code in codes:
continue
codes.append(code)
return codes[:-1] | {"/core/checker.py": ["/core/logging.py"], "/main.py": ["/core/logging.py", "/core/checker.py", "/core/generator.py"]} |
52,193 | 5l1v3r1/Discord-Nitro-Generator | refs/heads/main | /core/logging.py | from os import stat
from rich.console import Console
class Logging:
console = Console()
@staticmethod
def print(
content: str,
prefix: str,
prefix_color: str
) -> None:
Logging.console.print(
f'[{prefix_color}][{prefix}][/{prefix_color}] {content}'
)
@staticmethod
def success(content: str) -> None:
Logging.print(
content=content,
prefix='+',
prefix_color='bright_green'
)
@staticmethod
def info(content: str) -> None:
Logging.print(
content=content,
prefix='*',
prefix_color='bright_blue'
)
@staticmethod
def error(content: str) -> None:
Logging.print(
content=content,
prefix='-',
prefix_color='bright_yellow'
) | {"/core/checker.py": ["/core/logging.py"], "/main.py": ["/core/logging.py", "/core/checker.py", "/core/generator.py"]} |
52,217 | Vianpyro/minecraft_with_python | refs/heads/main | /classes/datapack.py | from .workspace import Workspace
from ..utilities.create_file import *
from ..utilities.create_pack_meta import *
from ..utilities.make_directory import *
from shutil import rmtree
from time import time
import os
data_types = ['advancements', 'dimension_type', 'dimension', 'item_modifiers', 'loot_tables', 'functions', 'predicates', 'recipes']
class Datapack:
"""
Datapacks can be placed in the .minecraft/saves/(world)/datapacks folder of a world. Each data pack is either a sub-folder or a .zip file \
within the datapacks folder. After it is in the folder, a data pack is enabled for that world when the world is reloaded or loaded.
Data packs load their data based on the load order. This order can be seen and altered by using the /datapack command and is stored in the \
level.dat file.
The player can also select data packs at the world creation screen by clicking the Data Packs button and dragging-and-dropping their data \
pack folders/zip-files there. This is similar to the Resource Pack selection screen, and allows the player to enable data packs before \
the world is generated, and easily customize the load order too.
"""
def __init__(
self, title: str='MyAmazingDatapack', path: str=os.getcwd(),
author:str='MCWPY', pack_meta: dict=None, workspaces: (Workspace, list)=None,
auto_compile: bool=False, compile_as_zip: bool=False, auto_replace: bool=False
):
"""
Initialisation of the Datapack.
Datapacks can be used to override or add new advancements, dimensions, functions, loot tables, predicates, recipes, structures, tags, \
world generation settings, and biomes without any code modification.
:param author: The author of the datapack.
:param auto_compile: Should the Datapack be compiled automatically when it is defined?
:param auto_replace: Should the Datapack automatically replace a previous version?
:param compile_as_zip: Should the Datapack be compiled into a zip file?
:param pack_meta: The content of the "pack.mcmeta".
:param path: The title where the datapack should be generated.
:param title: The title of the datapack.
:param workspaces: The content of the Datapack.
"""
self.author = author
self.auto_compile = auto_compile
self.auto_replace = auto_replace
self.compile_as_zip = compile_as_zip
self.pack_meta = pack_meta
self.path = '' if path in ['', None] else str(str(path[:-1] + os.path.sep) if path[-1] in [os.path.sep, '/'] else path + os.path.sep)
self.title = title
self.workspaces = [workspaces] if not isinstance(workspaces, list) else workspaces
# Test if this Datapack exists already.
self.exists = os.path.exists(self.path + self.title)
if self.exists and not self.auto_replace:
self.auto_replace = input(f'{self.title} already exists, do you want to replace it? [yes/no]: ')[0].lower() == 'y'
# Test if every element of the workspaces list is a Workspace.
if not all(isinstance(w, Workspace) or w == None for w in self.workspaces):
raise TypeError('Workspaces has to be instances of the Workspaces class!')
if self.auto_compile:
self.compile()
def generate(self) -> None:
time_stamp = time()
print(f'Compiling the Datapack in "{self.path}" as "{self.title}" this might take a few seconds...')
self.compile()
print(f'Successfuly generated the datapack in {time() - time_stamp:0.1} seconds :)')
def compile(self) -> None:
"""
This method compiles the data entered by the user to create a Minecraft Datapack.
:return: None or Error
"""
# Remove the old Datapack.
if self.exists:
if self.auto_replace:
rmtree(self.path + self.title)
print(f'Successfuly removed the Datapack "{self.title}"')
else:
print(f'Failed to remove the Datapack "{self.title}"')
# Generate the new Datapack.
if not self.exists or self.auto_replace:
make_directory(self.title, self.path) # Create the Datapack's folder.
if all(w == None for w in self.workspaces):
print('No content was generated!')
else:
make_directory('data', f'{self.path}{self.title}/')
main_function_list = list()
for workspace in [w for w in self.workspaces if w is not None]:
ws = workspace.read()
# Create a new directory for this workspace.
make_directory(workspace.title, f'{self.path}{self.title}/data/')
for key in ws:
directory_name = f'{self.path}{self.title}/data/{workspace.title}/{key}/'
if key in data_types and ws[key] != None:
# Create the key folder.
make_directory(key, f'{self.path}{self.title}/data/{workspace.title}/')
if key == 'functions':
for function_file_name in ws[key]:
create_file(
f'{function_file_name}.mcfunction', directory_name,
ws[key][function_file_name]
)
else:
print('Be careful, this kind of file is not verified by this program and may contain some errors:', key)
for json_file_name in ws[key]:
create_file(
f'{json_file_name}.json', directory_name,
ws[key][json_file_name]
)
print(f'Successfuly generated {key} files.')
elif key == 'tags':
make_directory(directory_name)
for e in ws[key]:
if e in ['blocks', 'entity_types', 'fluids', 'game_events', 'items']:
make_directory(e, directory_name)
for f in ws[key][e]:
create_file(
f'{f}.json', f'{directory_name}/{e}/',
ws[key][e][f]
)
elif ws[key] == None:
print(f'No file was generated for "{key}".')
else:
print(f'Failed to create {key} files: {key} is not supported (yet?).')
# Create the main(tick) and load files.
if not os.path.exists(f'{self.path}{self.title}/data/minecraft/'):
make_directory('minecraft', f'{self.path}{self.title}/data/')
if not os.path.exists(f'{self.path}{self.title}/data/minecraft/tags/'):
make_directory('tags', f'{self.path}{self.title}/data/minecraft/')
if not os.path.exists(f'{self.path}{self.title}/data/minecraft/tags/functions/'):
make_directory('functions', f'{self.path}{self.title}/data/minecraft/tags/')
# Create the load and tick files.
if os.path.exists(f'{self.path}{self.title}/data/{workspace.title}/functions/load.mcfunction'):
create_file(
'load.json', f'{self.path}{self.title}/data/minecraft/tags/functions/',
f'{{"values": ["{workspace.title}:load"]}}'
)
if os.path.exists(f'{self.path}{self.title}/data/{workspace.title}/functions/main.mcfunction'):
create_file(
'tick.json', f'{self.path}{self.title}/data/minecraft/tags/functions/',
f'{{"values": ["{workspace.title}:main"]}}'
)
# Create the "pack.mcmeta" file.
if self.pack_meta == None:
print('No "pack.mcmeta" was generated!')
else:
create_file(
'pack.mcmeta', f'{self.path}{self.title}{os.path.sep}',
str(create_pack_meta(
self.pack_meta['minecraft_version'],
self.pack_meta['description'],
self.author
))
)
| {"/classes/datapack.py": ["/classes/workspace.py", "/utilities/create_file.py", "/utilities/create_pack_meta.py", "/utilities/make_directory.py"], "/__init__.py": ["/classes/datapack.py", "/classes/workspace.py", "/utilities/create_file.py", "/utilities/create_pack_meta.py", "/utilities/import_from_file.py", "/utilities/make_directory.py"]} |
52,218 | Vianpyro/minecraft_with_python | refs/heads/main | /__init__.py | # -*- coding: utf-8 -*-
__version__ = "dev-1.0"
__author__ = "Vianpyro"
from .classes.datapack import *
from .classes.workspace import *
from .utilities.create_file import *
from .utilities.create_pack_meta import *
from .utilities.import_from_file import *
from .utilities.make_directory import * | {"/classes/datapack.py": ["/classes/workspace.py", "/utilities/create_file.py", "/utilities/create_pack_meta.py", "/utilities/make_directory.py"], "/__init__.py": ["/classes/datapack.py", "/classes/workspace.py", "/utilities/create_file.py", "/utilities/create_pack_meta.py", "/utilities/import_from_file.py", "/utilities/make_directory.py"]} |
52,219 | Vianpyro/minecraft_with_python | refs/heads/main | /utilities/create_pack_meta.py | def create_pack_meta(minecraft_version: str = '1.6.1', description: str = None, author: str = None) -> str:
'''
This function helps with creating the string of a "pack.mcmeta" file ready to be written.
:param minecraft_version: The version of Minecraft in which the user's server runs.
:param description: A short description of the uses of the Datapack the user wants to make.
:param author: The user's name to write into the "pack.mcmeta".
:return: String of a "pack.mcmeta" file.
'''
default_pack_format = 7
minecraft_version_to_pack_format = {
'1.6.1': 1, '1.6.2': 1, '1.6.4': 1,
'1.7.2': 1, '1.7.4': 1, '1.7.5': 1, '1.7.6': 1, '1.7.7': 1, '1.7.8': 1, '1.7.9': 1, '1.7.10': 1,
'1.8': 1, '1.8.1': 1, '1.8.2': 1, '1.8.3': 1, '1.8.4': 1, '1.8.5': 1, '1.8.6': 1, '1.8.7': 1, '1.8.8': 1, '1.8.9': 1,
'1.9': 2, '1.9.1': 2, '1.9.2': 2, '1.9.3': 2, '1.9.4': 2,
'1.10': 2, '1.10.1': 2, '1.10.2': 2,
'1.11': 3, '1.11.1': 3, '1.11.2': 3,
'1.12': 3, '1.12.1': 3, '1.12.2': 3,
'1.13': 4, '1.13.1': 4, '1.13.2': 4,
'1.14': 4, '1.14.1': 4, '1.14.2': 4, '1.14.3': 4, '1.14.4': 4,
'1.15': 5, '1.15.1': 5, '1.15.2': 5,
'1.16': 5, '1.16.1': 5,
'1.16.2': 6, '1.16.3': 6, '1.16.4': 6, '1.16.5': 6,
'1.17': 7, '1.17+': 7
}
if minecraft_version in minecraft_version_to_pack_format:
pack_format = minecraft_version_to_pack_format[minecraft_version]
else:
raise Warning(
f'This version of Minecraft seems to have no pack format defined:\nSet to {default_pack_format} by default.')
description = description
author = author
return str(
{
"pack": {
"author": author,
"description": description,
"pack_format": pack_format
}
}
).replace("'", '"')
| {"/classes/datapack.py": ["/classes/workspace.py", "/utilities/create_file.py", "/utilities/create_pack_meta.py", "/utilities/make_directory.py"], "/__init__.py": ["/classes/datapack.py", "/classes/workspace.py", "/utilities/create_file.py", "/utilities/create_pack_meta.py", "/utilities/import_from_file.py", "/utilities/make_directory.py"]} |
52,220 | Vianpyro/minecraft_with_python | refs/heads/main | /utilities/create_file.py | def create_file(name, path:str='', content:(str, list, dict)='') -> None:
'''
This functions creates a file.
:param name: The name of the file to create.
:param path: The path where the file has to be created.
:param content: The content to write in the created file.
:return: None or OS-Error if the file could not be created.
'''
with open(f'{path}{name}', 'w') as f:
if isinstance(content, str):
f.write(content)
elif isinstance(content, list):
for line in content:
f.write(f'{line}\n')
elif isinstance(content, dict):
for line in str(content).replace("'", '"').lower():
f.write(f'{line}')
else:
raise TypeError(f'Argument "content" must be of type "str" or "list" not {type(content)}!')
print(f'Successfuly created the file "{name}".')
| {"/classes/datapack.py": ["/classes/workspace.py", "/utilities/create_file.py", "/utilities/create_pack_meta.py", "/utilities/make_directory.py"], "/__init__.py": ["/classes/datapack.py", "/classes/workspace.py", "/utilities/create_file.py", "/utilities/create_pack_meta.py", "/utilities/import_from_file.py", "/utilities/make_directory.py"]} |
52,221 | Vianpyro/minecraft_with_python | refs/heads/main | /tests.py | import minecraft_with_python as mcwpy
import unittest
import os
class Tests(unittest.TestCase):
def test_class_datapack_path(self):
# Check that the path always ends with a path separator ("/") or nothing ("")
self.assertEqual(mcwpy.Datapack(path='').path, '')
self.assertEqual(mcwpy.Datapack(path=None).path, '')
self.assertEqual(mcwpy.Datapack(path='~/home').path[-1], os.path.sep)
self.assertEqual(mcwpy.Datapack(path='~/home/').path[-2:], f'e{os.path.sep}')
def test_class_datapack_workspaces(self):
self.assertIsInstance(mcwpy.Datapack(workspaces=[]).workspaces, list)
def test_class_workspace_title(self):
self.assertEqual(mcwpy.Workspace(title='MCWPY is AmAzInG').title, 'mcwpy_is_amazing')
self.assertEqual(mcwpy.Workspace(title='mcwpy_is_amazing').title, 'mcwpy_is_amazing')
self.assertEqual(mcwpy.Workspace(title='mcwpy-is-amazing').title, 'mcwpy-is-amazing')
self.assertEqual(mcwpy.Workspace(title=None).title, 'mcwpy')
self.assertEqual(mcwpy.Workspace(title='').title, 'mcwpy')
self.assertEqual(mcwpy.Workspace().title, 'mcwpy')
def test_class_workspace_content(self):
self.assertIsInstance(mcwpy.Workspace(content=None).content, dict)
self.assertIsInstance(mcwpy.Workspace(content={}).content, dict)
if __name__ == '__main__':
unittest.main()
| {"/classes/datapack.py": ["/classes/workspace.py", "/utilities/create_file.py", "/utilities/create_pack_meta.py", "/utilities/make_directory.py"], "/__init__.py": ["/classes/datapack.py", "/classes/workspace.py", "/utilities/create_file.py", "/utilities/create_pack_meta.py", "/utilities/import_from_file.py", "/utilities/make_directory.py"]} |
52,222 | Vianpyro/minecraft_with_python | refs/heads/main | /utilities/import_from_file.py | def import_from_file(path, extension='mcfunction') -> (str, None):
'''
This functions help with importing files instead of writing them.
The files has to exist on the user's computer to be imported.
:param path: The path where the resource has to be find.
:param extension: The extension of the resource [e.g. ".mcfunction", ".json", ".dat"].
:return: None or OS-Error if the resource can not be found or read, a string containing the content of the imported file otherwise.
'''
try:
with open(f'{path}.{extension}', 'r') as f:
r = [line.replace('\n', '') for line in f if line not in ['', ' ', '\n']]
return r
except:
raise ValueError(f'Could not read the file {path}.{extension}')
| {"/classes/datapack.py": ["/classes/workspace.py", "/utilities/create_file.py", "/utilities/create_pack_meta.py", "/utilities/make_directory.py"], "/__init__.py": ["/classes/datapack.py", "/classes/workspace.py", "/utilities/create_file.py", "/utilities/create_pack_meta.py", "/utilities/import_from_file.py", "/utilities/make_directory.py"]} |
52,223 | Vianpyro/minecraft_with_python | refs/heads/main | /classes/workspace.py | import warnings
data_types = ['advancements', 'dimension_type', 'dimension', 'item_modifiers', 'loot_tables', 'functions', 'predicates', 'recipes']
class Workspace():
"""
A Workspace is where the content of the Datapack will be written.
A Datapack may countain several Workspaces.
"""
def __init__(self, title: str='mcwpy', content: dict=None):
"""
Initialisation of the Workspace.
:param content: The content of this Workspace.
:param title: The title of this Workspace.
"""
self.title = title.replace(' ', '_').lower() if title is not None and len(title) > 1 else 'mcwpy'
self.content = dict() if content == None else content
if not self.title == title:
warnings.warn('Workspace titles has to be strings without capital letters.', SyntaxWarning)
def add(self, file_name: str=None, file_format: str=None, content: (list, str)=None) -> None:
name = file_name if file_name != None else f'function{len(self.content)}'
data = str(file_format if file_format[-1] == 's' else f'{file_format}s') if file_format != None else 'functions'
cntt = content if content != None and isinstance(content, list) else [content]
if not data in self.content:
self.content[data] = dict()
if name in self.content[data]:
for line in [e for e in cntt if e != None]:
self.content[data][name].append(line)
else:
self.content[data][name] = cntt
def read(self) -> dict:
return self.content
| {"/classes/datapack.py": ["/classes/workspace.py", "/utilities/create_file.py", "/utilities/create_pack_meta.py", "/utilities/make_directory.py"], "/__init__.py": ["/classes/datapack.py", "/classes/workspace.py", "/utilities/create_file.py", "/utilities/create_pack_meta.py", "/utilities/import_from_file.py", "/utilities/make_directory.py"]} |
52,224 | Vianpyro/minecraft_with_python | refs/heads/main | /utilities/make_directory.py | import os
def make_directory(name, path: str='') -> None:
"""
This functions creates a directory.
:param name: The name of the directory to create.
:param path: The path where the directory has to be created.
:return: None or OS-Error if the directory could not be created.
"""
try:
os.mkdir(f'{path}{name}')
except OSError:
raise OSError(f'Failed to create the directory "{name}".')
else:
print(f'Successfuly created the directory "{name}".')
| {"/classes/datapack.py": ["/classes/workspace.py", "/utilities/create_file.py", "/utilities/create_pack_meta.py", "/utilities/make_directory.py"], "/__init__.py": ["/classes/datapack.py", "/classes/workspace.py", "/utilities/create_file.py", "/utilities/create_pack_meta.py", "/utilities/import_from_file.py", "/utilities/make_directory.py"]} |
52,227 | siddharth-rawal/django-proof | refs/heads/master | /myapp/models.py | from django.contrib.auth.models import User
from django.db import models
import os
from django.conf import settings
def user_profile_image_path( instance, filename ):
return os.path.join( settings.STATIC_URL)
class UserProfile(models.Model):
user = models.OneToOneField(User)
address = models.CharField(max_length=140, blank=True, null=True)
phonenumber = models.IntegerField(blank=True, null=True)
gender = models.CharField(max_length=140, blank=True, null=True)
profile_picture = models.ImageField(upload_to='static/images/user/', null=True, blank=True)
class Meta:
db_table = "user_profile"
def __unicode__(self):
return u'Profile of user: %s' % self.user.username
| {"/myapp/views.py": ["/myapp/models.py"], "/myapp/url.py": ["/myapp/forms.py"], "/myapp/forms.py": ["/myapp/models.py"]} |
52,228 | siddharth-rawal/django-proof | refs/heads/master | /myapp/views.py | from django.shortcuts import get_object_or_404, redirect, render, render_to_response
from django.http import HttpResponseRedirect
from django.conf import settings
from forms import RegistrationForm, UpdateUserModelFields, UpdateUserProfileModelFields
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.context_processors import csrf
from django.template import RequestContext
from django.contrib.auth.models import User
from myapp.models import UserProfile
import pdb
#def login(request):
# if this is a POST request we need to process the form data
#if request.method == 'POST':
# create a form instance and populate it with data from the request:
# form = LoginForm(request.POST)
# check whether it's valid:
# if form.is_valid():
# process the data in form.cleaned_data as required
# ...
# redirect to a new URL:
# form.save()
# success message
# messages.info(request, 'Domain added successfully')
# return HttpResponseRedirect('/polls/domainlist/')
# if a GET (or any other method) we'll create a blank form
#else:
# form = LoginForm()
#return render(request, 'user/login.html', {'form': form})
@login_required(login_url="/myapp/login/")
def home(request):
return render(request,"user/home.html")
@login_required(login_url="/myapp/login/")
def editprofile(request):
user_t = get_object_or_404(User, id=request.user.id)
try:
userprofile_t = get_object_or_404(UserProfile, id=request.user.userprofile.id)
except:
userprofile_t = UserProfile()
form1 = UpdateUserModelFields(request.POST or None, instance=user_t)
try:
form2 = UpdateUserProfileModelFields(instance=userprofile_t)
except:
form2 = UpdateUserProfileModelFields()
if request.method == 'POST':
form1 = UpdateUserModelFields(request.POST or None, instance=user_t)
try:
form2 = UpdateUserProfileModelFields(request.POST or None, request.FILES, instance=userprofile_t)
except:
form2 = UpdateUserProfileModelFields()
if form1.is_valid() and form2.is_valid():
form1.save()
new_entry = form2.save(commit=False)
new_entry.user = request.user
new_entry.save()
# success message
messages.success(request, 'Profile updated successfully')
# Save was successful, so redirect to another page
return HttpResponseRedirect('/myapp/editprofile/')
return render(request,"user/editprofile.html", {'form1' : form1, 'form2' : form2})
def registration(request):
if request.user.is_authenticated():
return HttpResponseRedirect('/myapp/home')
args = {}
args.update(csrf(request))
args['form'] = RegistrationForm()
# 2nd time around
if request.method == 'POST':
form = RegistrationForm(request.POST)
args['form'] = form
if form.is_valid():
user = User.objects.create_user(
username=form.cleaned_data['username'],
last_name=form.cleaned_data['last_name'],
first_name=form.cleaned_data['first_name'],
password=form.cleaned_data['password1'],
email=form.cleaned_data['email']
)
#form.save()
return HttpResponseRedirect('/myapp/login')
# form with no input
# pdb.set_trace()
#return render(request, 'user/registration.html', {'form' : form})
return render(request, 'user/registration.html', args)
| {"/myapp/views.py": ["/myapp/models.py"], "/myapp/url.py": ["/myapp/forms.py"], "/myapp/forms.py": ["/myapp/models.py"]} |
52,229 | siddharth-rawal/django-proof | refs/heads/master | /myapp/url.py | from django.conf.urls import patterns, include, url
from django.contrib.auth import views
from django.contrib import admin
from myapp.forms import LoginForm
from django.views.generic.edit import CreateView
from django.contrib.auth.forms import UserCreationForm
urlpatterns = patterns('',
#url(r'^login/', 'myapp.views.login', name='login'),
url(r'^login/$', views.login, {'template_name': 'user/login.html', 'authentication_form': LoginForm}),
url(r'^registration/$', 'myapp.views.registration', name='registration'),
#url('^registration/', CreateView.as_view(template_name='user/registration.html', form_class=UserCreationForm, success_url='/')),
url(r'^home/$', 'myapp.views.home', name='home'),
url(r'^editprofile/$', 'myapp.views.editprofile', name='editprofile'),
url(r'^logout/$', views.logout, {'next_page': '/myapp/login'}),
) | {"/myapp/views.py": ["/myapp/models.py"], "/myapp/url.py": ["/myapp/forms.py"], "/myapp/forms.py": ["/myapp/models.py"]} |
52,230 | siddharth-rawal/django-proof | refs/heads/master | /myapp/forms.py | from django import forms
from django.contrib.auth.models import User
from myapp.models import UserProfile
from django.contrib.auth.forms import AuthenticationForm
#from django.views.generic.edit import CreateView
from django.contrib.auth.forms import UserCreationForm
from django.http import HttpResponseRedirect
#class LoginForm(forms.ModelForm):
#domain = forms.CharField(error_messages={'required': 'domain required'})
# email = forms.CharField(widget=forms.TextInput(attrs={'placeholder': 'Email', 'class': 'form-control'}))
# password = forms.CharField(widget=forms.PasswordInput(attrs={'placeholder': 'Password', 'class': 'form-control'}))
# class Meta:
# model = User
# fields = ['email', 'password']
# If you don't do this you cannot use Bootstrap CSS
class LoginForm(AuthenticationForm):
username = forms.CharField(required=True, max_length=30, widget=forms.TextInput(attrs={'placeholder': 'Username', 'class': 'form-control', 'name': 'username'}))
password = forms.CharField(max_length=30, widget=forms.PasswordInput(attrs={'placeholder': 'Password', 'class': 'form-control', 'name': 'password'}))
class Meta:
model = User
class RegistrationForm(UserCreationForm):
email = forms.EmailField(required = True, widget=forms.TextInput(attrs={'placeholder': 'Email', 'class': 'form-control'}))
first_name = forms.CharField(required = True, widget=forms.TextInput(attrs={'placeholder': 'First Name', 'class': 'form-control'}))
last_name = forms.CharField(required = True, widget=forms.TextInput(attrs={'placeholder': 'Last Name', 'class': 'form-control'}))
username = forms.CharField(required=True, widget=forms.TextInput(attrs={'placeholder': 'Username', 'class': 'form-control'}))
password1 = forms.CharField(required=True, widget=forms.PasswordInput(attrs={'placeholder': 'Password', 'class': 'form-control'}))
password2 = forms.CharField(required=True, widget=forms.PasswordInput(attrs={'placeholder': 'Confirm password', 'class': 'form-control'}))
class Meta:
model = User
fields = ('last_name', 'first_name', 'username', 'email', 'password1', 'password2')
class UpdateUserModelFields(forms.ModelForm):
email = forms.EmailField(required=True, widget=forms.TextInput(attrs={'placeholder': 'Email', 'class': 'form-control'}))
first_name = forms.CharField(required=True, widget=forms.TextInput(attrs={'placeholder': 'First Name', 'class': 'form-control'}))
last_name = forms.CharField(required=True, widget=forms.TextInput(attrs={'placeholder': 'Last Name', 'class': 'form-control'}))
username = forms.CharField(required=True, widget=forms.TextInput(attrs={'placeholder': 'Username', 'class': 'form-control'}))
class Meta:
model = User
fields = ('last_name', 'first_name', 'username', 'email')
class UpdateUserProfileModelFields(forms.ModelForm):
GENDER_CHOICES = (
('male', 'Male'),
('female', 'Female'),
)
address = forms.CharField(required=True, widget=forms.TextInput(attrs={'placeholder': 'Address', 'class': 'form-control'}))
phonenumber = forms.IntegerField(required=True, widget=forms.TextInput(attrs={'placeholder': 'Phone Number', 'class': 'form-control'}))
gender = forms.ChoiceField(widget=forms.RadioSelect, choices=GENDER_CHOICES)
profile_picture = forms.ImageField(required=True)
# gender = forms.CharField(required=True, widget=forms.RadioChoiceInput(attrs={'placeholder': 'Username', 'class': 'form-control'}))
class Meta:
model = UserProfile
fields = ('address', 'phonenumber', 'gender', 'profile_picture')
| {"/myapp/views.py": ["/myapp/models.py"], "/myapp/url.py": ["/myapp/forms.py"], "/myapp/forms.py": ["/myapp/models.py"]} |
52,241 | techpool/skfgi_notifier | refs/heads/master | /server.py | # Let's get this party started!
import falcon
import scrap
import json
import migrate
import clock
from query import query, querytojson
# Falcon follows the REST architectural style, meaning (among
# other things) that you think in terms of resources and state
# transitions, which map to HTTP verbs.
class NotificationResource(object):
def on_get(self, req, resp):
"""Handles GET requests"""
resp.status = falcon.HTTP_200 # This is the default status
params = req.params
limit = 0
query_result = []
try:
limit = int(params['limit'])
except (KeyError, ValueError) as e:
limit = None
finally:
if limit == 0 or limit == None:
query_result = query.get_all_notice()
else:
query_result = query.get_n_recent_notice(limit)
json_data = querytojson.json_wrapper(query_result)
resp.body = (str(json_data))
class ScrapNow(object):
def on_post(self, req, resp):
json_response = scrap.scrap()
resp.status = falcon.HTTP_200
resp.body = json_response
class TableDropper(object):
"""docstring for TableDropper"""
def on_post(self, req, resp):
json_response = migrate.drop_table()
resp.status = falcon.HTTP_200
resp.body = json_response
class TableCreater(object):
"""docstring for TableCreater"""
def on_post(self, req, resp):
json_response = migrate.create_table()
resp.status = falcon.HTTP_200
resp.body = json_response
# falcon.API instances are callable WSGI apps
app = falcon.API()
# Resources are represented by long-lived class instances
notifications = NotificationResource()
scraproute = ScrapNow()
droptable = TableDropper()
createtable = TableCreater()
app.add_route('/notify', notifications)
app.add_route('/scrap', scraproute)
app.add_route('/droptable', droptable)
app.add_route('/createtable', createtable) | {"/server.py": ["/scrap.py", "/migrate.py", "/clock.py"], "/scrap.py": ["/db/dbconnect.py"], "/query/query.py": ["/db/dbconnect.py"], "/migrate.py": ["/db/dbconnect.py"]} |
52,242 | techpool/skfgi_notifier | refs/heads/master | /scrap.py | import requests
import sqlite3
import datetime
import json
import db.dbconnect as dbconnector
from bs4 import BeautifulSoup
def scrap():
new_notice = 0
new_notice_list = []
# Creating the database connection
conn = dbconnector.connect()
# Creating the cursor object
c = conn.cursor()
# The root link for the notice page where er get the irritating marquee
notice_root_link = 'http://www.skf.edu.in/notice/'
# Finak endpoint for the notice page
notice_link_endpoint = notice_root_link + 'notice.aspx'
# Response object after making a GET request to the endpoint
response_object = requests.get(notice_link_endpoint)
# Parsing the complete html out of the response object
notice_html = response_object.text
# Creating soup object for parsing the HTML later
soup = BeautifulSoup(notice_html, 'html.parser')
# Selector for notice anchor tags
notice_list = soup.select('#divSkill a')
# Sl no. is only for printing, they are not saved in the DB
sl_no = 1
for notice in notice_list:
print(str(sl_no), end=': ')
# Selecting the previous sibling which gives the text withing the ugly
# background coloured text, which is saved as heading
notice_heading_object = notice.previous_sibling.previous_sibling
notice_heading = notice_heading_object.b.span.text
# escaping some html escape sequence
notice_heading = notice_heading.replace(u'\xa0', u' ')
print(notice_heading, end=': ')
# Taking the notice subheading text
notice_sub_heading = notice.b.text
print(notice_sub_heading)
# Taking the URL for each of the notices
notice_link = notice_root_link + notice['href']
print('URL: ' + notice_link)
# Sending a HEAD request to the above got URL in order to get the
# LAST MODIFIED header information which will act as a primary key
# in the database
notice_request = requests.head(notice_link)
last_modified = notice_request.headers['Last-Modified']
print('Last Modified: ' + last_modified)
# Parsing the datatime in the format recognizable for the SQLite
time = datetime.datetime.strptime(last_modified, "%a, %d %b %Y %H:%M:%S %Z")
# Try inserting the data in the table
# TO_DO: If the insertion takes place then it will shout out the data to a
# WebHook for messenger which will send off the data to all the subscribers
# of the page
try:
values = (notice_heading, notice_sub_heading, notice_link, time)
c.execute("INSERT INTO notification VALUES(?, ?, ?, ?)", values)
new_notice = new_notice + 1
notice_details = {
"heading": values[0],
"sub_heading": values[1],
"url": values[2],
"update_time": str(values[3])
}
new_notice_list.append(notice_details)
except sqlite3.IntegrityError as e:
print('Record Already Exists')
sl_no += 1
# Commiting the changes in the database so that the change is persistent
# in the database
conn.commit()
json_data = {
"status": "success",
"new_items": len(new_notice_list),
"data": new_notice_list
}
json_data = json.dumps(json_data)
return json_data
if __name__ == '__main__':
scrap() | {"/server.py": ["/scrap.py", "/migrate.py", "/clock.py"], "/scrap.py": ["/db/dbconnect.py"], "/query/query.py": ["/db/dbconnect.py"], "/migrate.py": ["/db/dbconnect.py"]} |
52,243 | techpool/skfgi_notifier | refs/heads/master | /query/querytojson.py | import json
import query
def json_wrapper(query_obj):
data = []
for query_tuble in query_obj:
json_obj = {
"heading": str(query_tuble[0]),
"sub_heading": str(query_tuble[1]),
"url": str(query_tuble[2]),
"update_time": str(query_tuble[3])
}
data.append(json_obj)
data = json.dumps(data)
return data | {"/server.py": ["/scrap.py", "/migrate.py", "/clock.py"], "/scrap.py": ["/db/dbconnect.py"], "/query/query.py": ["/db/dbconnect.py"], "/migrate.py": ["/db/dbconnect.py"]} |
52,244 | techpool/skfgi_notifier | refs/heads/master | /db/dbconnect.py | import sqlite3
DB_NAME = 'skfgi.db'
connection = sqlite3.connect(DB_NAME)
def connect():
return connection | {"/server.py": ["/scrap.py", "/migrate.py", "/clock.py"], "/scrap.py": ["/db/dbconnect.py"], "/query/query.py": ["/db/dbconnect.py"], "/migrate.py": ["/db/dbconnect.py"]} |
52,245 | techpool/skfgi_notifier | refs/heads/master | /query/query.py | import sqlite3
import db.dbconnect as dbconnector
import json
def get_all_notice():
connection = dbconnector.connect()
cursor = connection.cursor()
result = cursor.execute('SELECT * FROM notification')
return result
def get_recent_notice():
connection = dbconnector.connect()
cursor = connection.cursor()
result = cursor.execute('''SELECT * FROM notification ORDER BY updatetime DESC LIMIT 1''')
return result
def get_n_recent_notice(limit=1):
connection = dbconnector.connect()
cursor = connection.cursor()
result = cursor.execute('''SELECT * FROM notification ORDER BY updatetime DESC LIMIT ?''', (limit,))
return result
| {"/server.py": ["/scrap.py", "/migrate.py", "/clock.py"], "/scrap.py": ["/db/dbconnect.py"], "/query/query.py": ["/db/dbconnect.py"], "/migrate.py": ["/db/dbconnect.py"]} |
52,246 | techpool/skfgi_notifier | refs/heads/master | /clock.py | import time
import requests
import threading
def scrap_call():
while True:
try:
requests.post('http://skfginotifier.herokuapp.com/scrap', timeout=40)
except requests.exceptions.ReadTimeout as e:
print(e)
finally:
time.sleep(180)
t = threading.Thread(target=scrap_call)
t.daemon = True
t.start() | {"/server.py": ["/scrap.py", "/migrate.py", "/clock.py"], "/scrap.py": ["/db/dbconnect.py"], "/query/query.py": ["/db/dbconnect.py"], "/migrate.py": ["/db/dbconnect.py"]} |
52,247 | techpool/skfgi_notifier | refs/heads/master | /migrate.py | import sqlite3
import json
import db.dbconnect as dbconnector
def drop_table():
conn = dbconnector.connect()
cursor = conn.cursor()
json_data = {}
try:
cursor.execute('''DROP TABLE notification''')
json_data['status'] = "sucess"
except sqlite3.OperationalError as e:
json_data['status'] = "error"
json_data['error'] = str(e)
finally:
conn.commit()
json_data = json.dumps(json_data)
return json_data
def create_table():
conn = dbconnector.connect()
cursor = conn.cursor()
json_data = {}
try:
cursor.execute('''CREATE TABLE notification
(heading text, subheading text, url text, updatetime timestamp UNIQUE)''')
json_data['status'] = "sucess"
except sqlite3.OperationalError as e:
json_data['status'] = "error"
json_data['error'] = str(e)
finally:
conn.commit()
json_data = json.dumps(json_data)
return json_data | {"/server.py": ["/scrap.py", "/migrate.py", "/clock.py"], "/scrap.py": ["/db/dbconnect.py"], "/query/query.py": ["/db/dbconnect.py"], "/migrate.py": ["/db/dbconnect.py"]} |
52,248 | zengdz/crimemap | refs/heads/master | /mockdbhelper.py |
class MockDBHelper:
def connect(self, database="crimemap"):
pass
def get_all_inputs(self):
return []
def add_input(self, data):
pass
def clear_all(self):
pass
def add_record(self, category, date, latitude, longitude, description):
pass
def get_all_records(self):
return [{ 'latitude': 23.1157,
'longitude': 113.3009,
'date': "2000-01-01",
'category': "mugging",
'description': "mock description" }]
| {"/crimemap.py": ["/mockdbhelper.py", "/dbhelper.py"]} |
52,249 | zengdz/crimemap | refs/heads/master | /dbhelper.py | #coding: utf-8
import pymysql
import dbconfig
import datetime
#所有的数据操作都放在try-finally里面,以便数据库最终都能顺利关闭连接
class DBHelper:
#数据库连接,每次操作数据表格前都要连接,末尾指定编码
def connect(self, database="crimemap"):
return pymysql.connect(host='localhost',
user=dbconfig.db_user,
passwd=dbconfig.db_password,
db=database,
charset = 'utf8')
#之前测试实验从crimes获取description数据
def get_all_inputs(self):
connection = self.connect() #每次操作之前都要先和数据库建立连接
try:
query = "SELECT description FROM crimes;" #从crimes数据库选择description数据,注意最后有分号,应该是SQL的语法
with connection.cursor() as cursor: #使用with-as
cursor.execute(query) #指针执行这个请求之后,cursor就指向所需数据了。
return cursor.fetchall() #使用fetchall方法把指向的数据变成Python能处理的列表数据
finally:
connection.close()
#之前测试实验向crimes插入description数据
def add_input(self, data):
connection = self.connect()
try:
# small fix to SQL injection
query = "INSERT INTO crimes (description) VALUES (%s);" #作为测试用只插入一个description数据
with connection.cursor() as cursor:
cursor.execute(query, data) #执行请求操作
connection.commit() #不同于读取数据,插入数据对数据库修改了所以要提交修改才能生效
finally:
connection.close()
#删除crimes表格的所有数据
def clear_all(self):
connection = self.connect()
try:
query = "DELETE FROM crimes;" #删除crimes数据库的所有内容
with connection.cursor() as cursor:
cursor.execute(query) #执行请求操作
connection.commit() #删除数据对数据库修改了所以要提交修改才能生效
finally:
connection.close()
def add_record(self, category, date, latitude, longitude, description):
connection = self.connect()
try:
query = "INSERT INTO crimes (category, date, latitude, longitude, description) VALUES (%s, %s, %s, %s, %s);"
with connection.cursor() as cursor:
cursor.execute(query, (category, date, latitude, longitude, description))
connection.commit()
except Exception as e:
print(e)
finally:
connection.close()
def get_all_records(self):
connection = self.connect()
try:
query = "SELECT latitude, longitude, date, category, description FROM crimes;"
with connection.cursor() as cursor:
cursor.execute(query)
named_crimes = []
#从cursor提取元组数据转换为JSON数据保存到列表,方便后面在JavaScript处理
for crime in cursor:
named_crime = {
'latitude': crime[0],
'longitude': crime[1],
'date': datetime.datetime.strftime(crime[2], '%Y-%m-%d'),
'category': crime[3],
'description': crime[4]
}
named_crimes.append(named_crime)
return named_crimes
finally:
connection.close()
| {"/crimemap.py": ["/mockdbhelper.py", "/dbhelper.py"]} |
52,250 | zengdz/crimemap | refs/heads/master | /crimemap.py | #coding:utf-8
#服务器使用的是Python2,代码里含有中文时需要指明程序文件的编码
from flask import Flask
from flask import render_template
from flask import request
import json
#注意dbconfig没有加入版本管理,本地的dbconfig只有一句 test = True
#服务器端的dbconfig除了有test = False,还有数据库的凭证
import dbconfig
if dbconfig.test:
from mockdbhelper import MockDBHelper as DBHelper
else:
from dbhelper import DBHelper
app = Flask(__name__)
DB = DBHelper()
@app.route("/")
def home():
records = DB.get_all_records()
records = json.dumps(records)
return render_template("home_google.html", records=records)
#前端显示地图可以选择谷歌地图或者百度地图home_google或者home_baidu
@app.route("/submitrecord", methods=['POST'])
def submitrecord():
category = request.form.get("category")
date = request.form.get("date")
latitude = float(request.form.get("latitude"))
longitude = float(request.form.get("longitude"))
description = request.form.get("description")
DB.add_record(category, date, latitude, longitude, description)
return home()
@app.route("/clear")
def clear():
try:
DB.clear_all()
except Exception as e:
print(e)
return home()
if __name__ == '__main__':
app.run(port=5000, debug=True)
| {"/crimemap.py": ["/mockdbhelper.py", "/dbhelper.py"]} |
52,255 | kicon/armstrong.dev | refs/heads/master | /armstrong/dev/tests/utils/__init__.py | from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
from armstrong.dev.tests.utils.base import ArmstrongTestCase, override_settings | {"/armstrong/dev/tests/utils/__init__.py": ["/armstrong/dev/tests/utils/base.py"], "/armstrong/dev/virtualdjango/test_runner.py": ["/armstrong/dev/virtualdjango/base.py"], "/armstrong/dev/tests/utils/users.py": ["/armstrong/dev/tests/utils/base.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.