index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
27,432
|
285027200/myproject1
|
refs/heads/master
|
/myproject1/__init__.py
|
# 设置pymsql的链接信息,注意是django2.2版本之前的方法
import pymysql
pymysql.install_as_MySQLdb()
|
{"/apps/admin/views.py": ["/utils/json_fun.py", "/myproject1/__init__.py"], "/apps/users/views.py": ["/utils/json_fun.py", "/apps/users/forms.py", "/apps/users/models.py"], "/apps/verifications/views.py": ["/utils/json_fun.py"], "/apps/users/forms.py": ["/apps/users/models.py"], "/apps/news/views.py": ["/myproject1/__init__.py", "/utils/json_fun.py"]}
|
27,433
|
285027200/myproject1
|
refs/heads/master
|
/apps/users/models.py
|
# 1.导入模块
from django.db import models
from django.contrib.auth.models import AbstractUser, UserManager as _UserManager
# Create your models here.
# 7.继承UserManager和重写类
class NewUserManager(_UserManager):
# 默认参数email为空
def create_superuser(self, username, password, email=None, **extra_fields):
# 重写
super(NewUserManager, self).create_superuser(
username=username,
password=password,
email=email,
**extra_fields
)
# 2.继承AbstractUser类,保留AbstractUser里面重要的方法
class Users(AbstractUser):
# 8.实例化,把原来的object替换成我们重写的
object = NewUserManager()
# 9.用于创建超级用户是需要输入的字段,例如账号密码那样
REQUIRED_FIELDS = ['mobile']
# 3.添加新的字段moblie和emaile_active
mobile = models.CharField(max_length=11, # 限定最大字数
unique=True, # 设置唯一
verbose_name='手机号', # 显示名称
help_text='手机号', # 提示信息
error_messages={
'unique': '此手机号已注册~' # unique报错显示的内容
})
# 4.用于是否保留邮箱的操作
emaile_active = models.BooleanField(default=False, verbose_name='邮箱验证状态')
# 5.嵌套类,用于指定部分内容例如数据库的表名
class Meta:
db_table = 'tb_users' # 指定数据库表名,不定义的话一般是app名加类的小写
verbose_name = '用户' # 中文显示名
verbose_name_plural = verbose_name # 复数名称
# 6.打印时对象显示的内容
def __str__(self):
return self.username
# 10.写完代码和配置号settings之后要在manage命令行中执行数据迁移:makemigrations和migrate
def get_groups_name(self):
group_name_list = [i.name for i in self.groups.all()]
return '|'.join(group_name_list)
|
{"/apps/admin/views.py": ["/utils/json_fun.py", "/myproject1/__init__.py"], "/apps/users/views.py": ["/utils/json_fun.py", "/apps/users/forms.py", "/apps/users/models.py"], "/apps/verifications/views.py": ["/utils/json_fun.py"], "/apps/users/forms.py": ["/apps/users/models.py"], "/apps/news/views.py": ["/myproject1/__init__.py", "/utils/json_fun.py"]}
|
27,434
|
285027200/myproject1
|
refs/heads/master
|
/apps/users/urls.py
|
# 导入模块
from django.urls import path
from users import views
# 定义app名字
app_name = 'users'
urlpatterns = [
# 定义注册页面的路由
path('register/', views.RegisterView.as_view(), name='register'),
path('login/', views.LoginView.as_view(), name='login'),
path('logout/', views.LogoutView.as_view(), name='logout'),
]
|
{"/apps/admin/views.py": ["/utils/json_fun.py", "/myproject1/__init__.py"], "/apps/users/views.py": ["/utils/json_fun.py", "/apps/users/forms.py", "/apps/users/models.py"], "/apps/verifications/views.py": ["/utils/json_fun.py"], "/apps/users/forms.py": ["/apps/users/models.py"], "/apps/news/views.py": ["/myproject1/__init__.py", "/utils/json_fun.py"]}
|
27,435
|
285027200/myproject1
|
refs/heads/master
|
/apps/news/views.py
|
# django自带的模块
# 导入日志器
import logging
import json
# 导入渲染模块
from django.shortcuts import render, HttpResponse
# 导入类视图模块
from django.views import View
# 导入分页模块
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
# 导入404模块
from django.http import Http404
# app下的模块
# 导入数据库模板
from . import models
# 导入常量
from . import constants
# 导入settings
from myproject1 import settings
# 其他的模块
from utils.json_fun import to_json_data
from utils.res_code import Code, error_map
from haystack.views import SearchView as _SearchView
# 调用名为django的日志器
logger = logging.getLogger('django')
# 第一次测试显示主页面
def index(request):
return render(request, 'news/index.html')
# 新闻主页
class IndexView(View):
''''''
def get(self,request):
# 查数据库里的标签的数据
tags = models.Tag.objects.only('id', 'name').filter(is_delete=False) # 只查到没有被逻辑删除的数据的id和name
# 查询数据库里的热门新闻的数据(通过链接的方式查询news下的title、imgage_url、id且排除掉被逻辑删除掉了的和根据priority排序或者-news__clicks排序,最后用切片的方式只取前3)
hot_news = models.HotNews.objects.select_related('news').only('news__title', 'news__image_url', 'news__id').filter(is_delete=False).order_by('priority', '-news__clicks')[0:constants.SHOW_HOTNEWS_COUNT]
# # 上下文管理器
# context = {
# 'tags': tags
# }
# # 返回渲染页面和数据
# return render(request, 'news/index.html', context=context)
# 更好的返回方法
return render(request, 'news/index.html', locals()) # locals能把当前的变量都传过去,不用用context
# 新闻列表
class NewsListView(View):
'''
/news/
# ajax局部请求刷新
# 传参:tag的id 和page
# 后台返回前端:拿到7个字段(文章名、标签、简介、作者名、时间、图片、文章id)
# 请求方式:GET
# 传参方式:查询字符串 ?tag_id1%page=2
'''
def get(self, request):
# 获取前端参数(因为是用查询字符串的方法获取参数的,所以用下面的方法)
# 校验参数
# 用try方法是因为就算错误了还能继续运行到下一步,友好,也方便我们处理异常
try: # 判断用户输入的参数是否正常,假如用户输的参数是字母,那就会报错
tag_id = int(request.GET.get('tag_id', 0)) # get获取的参数是str格式的,需要int转换
except Exception as e: # 如果用户报错,那我们就友好的给他正确的参数
logger.error('标签的错误:\n{}'.format(e))
tag_id = 0
try:
page = int(request.GET.get('page', 1))
except Exception as e:
logger.error('页码错误:\n{}'.format(e))
page = 1
# 从数据库获取数据
'''
# 在news中查title、digest、image_url、update_time
# 用select_related方法去关联其他的表
'''
# 用select_related方法去关联tag和author表,only方法只拿需要用的内容,返回的是查询集的格式
news_queryset = models.News.objects.select_related('tag', 'author').only('title', 'digest', 'image_url', 'update_time', 'tag__name', 'author__username') # id字段是默认会查的,不用主动写入
# 如果传的tag_id参数存在的话就直接赋值给news,或者tag_id不存在的话就赋值第二个给news
news = news_queryset.filter(is_delete=False, tag_id=tag_id) or news_queryset.filter(is_delete=False)
# 分页内容(把部分需要的内容给前端,不要一次性给全部)
paginator = Paginator(news, constants.PER_PAGE_NEWS_COUNT) # 第一个参数是拿到数据,第二个参数是每页要显示的数据条数
try: # 判断用户传的页数如否正确
news_info = paginator.page(page)
except EmptyPage: # 如果用户传的页数不对
logger.error('用户访问的页数大于总页数')
news_info = paginator.page(paginator.num_pages) # 我们就给他一个最后一页num_pages是最后一页的意思
# 序列化输出(因为我们返回给前端的是json格式的,所以要把格式给先安排一下)
news_info_list = []
for n in news_info:
news_info_list.append({
'id': n.id,
'title': n.title,
'digest': n.digest,
'image_url': n.image_url,
# 时间的格式化
'update_time': n.update_time.strftime('%Y年%m月%d日 %H:%M'),
'tag_name': n.tag.name,
'author': n.author.username,
})
data = {
'news': news_info_list,
'total_pages': paginator.num_pages,
}
# 返回前端
return to_json_data(data=data)
# 轮播图
class NewsBanner(View):
'''
# 用ajax来传递参数
# 要拿到轮播图表的image_url、news_id和新闻表的title
'''
def get(self, request):
# 从数据库中拿去数据(直接查询Banner中的image_url和news_id,用关联news查询news__title,且排除掉被逻辑删除掉了的和根据priority排序,最后用切片的方式只取前6)
banners = models.Banner.objects.select_related('news').only('image_url', 'news_id', 'news__title').filter(is_delete=False).order_by('priority')[0:constants.SHOW_BANNER_COUNT]
# 序列化输出
banners_info_list = []
for b in banners:
banners_info_list.append(
{
'image_url':b.image_url,
'news_id':b.news_id,
'news_title':b.news.title
}
)
data = {
'banners':banners_info_list
}
return to_json_data(data=data)
# 文章详情
class NewsDetailView(View):
'''
/news/<int:news_id>
通过模板渲染的方式来实现
传参:文章id
返回:5个(标题、作者、时间、标签、内容)
'''
def get(self, request, news_id):
# 从数据库拿去数据(直接查询news里的title、content、update_time通过关联拿到tag__name、author__username且排除被逻辑删除了的和要求符合id=news_id,拿到第一个数据)
news = models.News.objects.select_related('tag', 'author').only('title', 'content', 'update_time', 'tag__name', 'author__username').filter(is_delete=False, id=news_id).first()
# 如果从数据库中拿到了数据就返回
if news:
# 评论功能写在这里
# 需要从数据库拿到content, update_time, parent.username, parent.content, parent.update_time
comments = models.Comments.objects.select_related('author', 'parent').only('content', 'update_time', 'author__username', 'parent__content', 'parent__author__username', 'parent__update_time').filter(is_delete=False, news_id=news_id)
# 序列化输出(写在了models中)
comments_info_list = []
for comm in comments:
comments_info_list.append(comm.to_dict_data())
return render(request, 'news/news_detail.html', locals())
# 否则报错
else:
raise Http404('新闻{}不存在'.format(news_id))
# 回复评论
class NewsCommentView(View):
'''
/news/<int:news_id>/comments/
'''
def post(self,request,news_id):
# 判断用户是否有登录
if not request.user.is_authenticated:
return to_json_data(errno=Code.SESSIONERR, errmsg=error_map[Code.SESSIONERR])
# 获取数据库的参数且判断是否存在
if not models.News.objects.only('id').filter(is_delete=False, id=news_id):
return to_json_data(errno=Code.PARAMERR, errmsg='新闻不存在')
# 获取前端输入的参数且判断是否存在
json_data = request.body
if not json_data:
return to_json_data(errno=Code.PARAMERR, errmsg=error_map[Code.PARAMERR])
dict_data = json.loads(json_data.decode('utf8'))
# 校验参数
# 拿到前端输入的内容且判断是否为空
content = dict_data.get('content')
if not content:
return to_json_data(errno=Code.PARAMERR, errmsg='评论不能为空!')
# 拿到前端输入的父评论且判断是否存在
'''父评论的验证-1有没有父评论2parent_id必须为数字3数据库里是否存在4父评论的新闻id是否跟当前的news_id一致'''
parent_id = dict_data.get('parent_id')
# 因为涉及到判断是否为数字,所以要用try语句
try:
if parent_id:
parent_id = int(parent_id)
if not models.Comments.objects.only('id').filter(is_delete=False, id=parent_id, news_id=news_id).exists():
return to_json_data(errno=Code.PARAMERR, errmsg=error_map[Code.PARAMERR])
except Exception as e:
logger.info('前端传的parent_id异常{}'.format(e))
return to_json_data(errno=Code.PARAMERR, errmsg='未知异常')
# 存入参数
new_comment = models.Comments() # 实例化一个对象
# 把对象的属性赋值
new_comment.content = content
new_comment.news_id = news_id
new_comment.author = request.user
new_comment.parent_id = parent_id if parent_id else None # 加多一层验证,防止爬虫
new_comment.save()
# 返回前端,使用models.py中那个序列化
return to_json_data(data=new_comment.to_dict_data())
# 搜索功能
class SearchView(_SearchView):
# 定义模版文件
template = 'news/search.html'
# 重写响应方式,如果请求参数q为空,返回模型News的热门新闻数据,否则根据参数q搜索相关数据
def create_response(self):
kw = self.request.GET.get('q', '') # 获取前端的url中有木有q
# 判断有没有拿到搜索的关键字,如果没有就展示所有的
if not kw:
show_all = True # 展示所有数据(只是个标志而已)
# 从HotNews表中关联的news表中拿到news__title,news__image_url,news__id且要求是没被逻辑删除的,和按优先级排序或点击量排序
hot_news = models.HotNews.objects.select_related('news').only('news__title', 'news__image_url', 'news__id').filter(is_delete=False).order_by('priority', '-news__clicks')
# 分页
paginator = Paginator(hot_news, settings.HAYSTACK_SEARCH_RESULTS_PER_PAGE)
try:
page = paginator.page(int(self.request.GET.get('page', 1)))
except PageNotAnInteger:
# 如果参数page的数据类型不是整型,则返回第一页数据
page = paginator.page(1)
except EmptyPage:
# 用户访问的页数大于实际页数,则返回最后一页的数据
page = paginator.page(paginator.num_pages)
return render(self.request, self.template, locals())
# 否则的意思就是拿到了关键字,那就不展示所有的数据
else:
show_all = False # 展示有的数据(只是个标志而已)
# 继承和使用正宗的SearchView类的方法,上面的是被重写了的
qs = super(SearchView, self).create_response()
return qs
|
{"/apps/admin/views.py": ["/utils/json_fun.py", "/myproject1/__init__.py"], "/apps/users/views.py": ["/utils/json_fun.py", "/apps/users/forms.py", "/apps/users/models.py"], "/apps/verifications/views.py": ["/utils/json_fun.py"], "/apps/users/forms.py": ["/apps/users/models.py"], "/apps/news/views.py": ["/myproject1/__init__.py", "/utils/json_fun.py"]}
|
27,436
|
bmickunas/BeatGoesOn
|
refs/heads/master
|
/settings.py
|
# Settings for this app.
settings = dict(
# the main settings dict contains nothing right now.
# we use this to get secret things from settings_local
)
try:
# pull in settings_local if it exists
from settings_local import settings as s
settings.update(s)
except ImportError:
pass
|
{"/gradient_descent_tests.py": ["/gradient_descent.py"]}
|
27,437
|
bmickunas/BeatGoesOn
|
refs/heads/master
|
/gradient_descent_tests.py
|
'''
gradient_descent_tests.py - Basic unittests for gradient_descent.py.
Author: Bradley Mickunas
Date: December 12, 2012
'''
import unittest
import gradient_descent
dim_full_set = ['danceability','energy','key','loudness',
'tempo', 'speechiness', 'liveness', 'mode',
'time_signature']
equal_pair = ({'key': 2, 'title': 'equal_0', 'energy': 0.4, 'liveness': 0.1,
'tempo': 91.9,
'speechiness': 0.1, 'artist_name': 'Rihanna', 'vect': {'time_signature': 0.5,
'energy': 0.4, 'liveness': 0.1, 'tempo': 0.3, 'speechiness': 0.1,
'danceability': 0.5, 'key': 0.2, 'loudness': 0.8, 'mode': 0.0}, 'mode': 0,
'time_signature': 4, 'duration': 225.2273, 'loudness': -6.7379999999999995,
'danceability': 0.547264495122005},
{'key': 2, 'title': 'equal_1', 'energy': 0.4, 'liveness': 0.1, 'tempo': 91.9,
'speechiness': 0.1, 'artist_name': 'Rihanna', 'vect': {'time_signature': 0.5,
'energy': 0.4, 'liveness': 0.1, 'tempo': 0.3, 'speechiness': 0.1,
'danceability': 0.5, 'key': 0.2, 'loudness': 0.8, 'mode': 0.0}, 'mode': 0,
'time_signature': 4, 'duration': 225.2273, 'loudness': -6.7379999999999995,
'danceability': 0.547264495122005})
pair_error_of_half = ({'key': 2, 'title': 'halferror_0', 'energy': 0.4, 'liveness': 0.1,
'tempo': 91.9,
'speechiness': 0.1, 'artist_name': 'Rihanna', 'vect': {'time_signature': 0.5,
'energy': 0.4, 'liveness': 0.1, 'tempo': 0.3, 'speechiness': 0.1,
'danceability': 0.5, 'key': 0.2, 'loudness': 0.8, 'mode': 0.0}, 'mode': 0,
'time_signature': 4, 'duration': 225.2273, 'loudness': -6.7379999999999995,
'danceability': 0.547264495122005},
{'key': 2, 'title': 'halferror_1', 'energy': 0.4, 'liveness': 0.1, 'tempo': 91.9,
'speechiness': 0.1, 'artist_name': 'Rihanna', 'vect': {'time_signature': 0.5,
'energy': 0.4, 'liveness': 0.1, 'tempo': 0.3, 'speechiness': 0.1,
'danceability': 0.1, 'key': 0.3, 'loudness': 0.6, 'mode': 0.0}, 'mode': 0,
'time_signature': 4, 'duration': 225.2273, 'loudness': -6.7379999999999995,
'danceability': 0.547264495122005})
training_set = [equal_pair,pair_error_of_half]
training_weights = {'time_signature': 1.25, 'energy': 1.2, 'liveness': 1.05,
'tempo': 1.15, 'speechiness': 1.05, 'danceability': 1.25, 'key': 1.1,
'loudness': 1.4, 'mode': 1.0}
zero_weights = {'time_signature': 0.0, 'energy': 0.0, 'liveness': 0.0,
'tempo': 0.0, 'speechiness': 0.0, 'danceability': 0.0, 'key': 0.0,
'loudness': 0.0, 'mode': 0.0}
delta_weights_one_iter = {'time_signature': 0.25, 'energy': 0.2,
'liveness': 0.05, 'tempo': 0.15, 'speechiness': 0.05,
'danceability': 0.25, 'key': 0.1, 'loudness': 0.4, 'mode': 0.0}
class TestGradientDescent(unittest.TestCase):
def setUp(self):
self.learner = gradient_descent.GradientDescent()
def test_ZeroError(self):
# Test _error() on the same song
self.assertAlmostEqual(0.0, self.learner._error(equal_pair[0],equal_pair[1]))
def test_ZeroGradientVector(self):
# Test if having a loss of zero brings out no adjustments to the model
delta_weights = self.learner._calc_gradient(self.learner.learning_rate,0,equal_pair[0])
for dim in dim_full_set:
self.assertEqual(zero_weights[dim], delta_weights[dim])
def test_Error(self):
# Test _error() on different songs
self.assertAlmostEqual(0.5, abs(self.learner._error(pair_error_of_half[0],pair_error_of_half[1])))
def test_GradientVector(self):
# Test if the gradient vector is calculated correctly with one pair
delta_weights = self.learner._calc_gradient(1.0, 0.5,pair_error_of_half[0])
for dim in dim_full_set:
self.assertAlmostEqual(delta_weights_one_iter[dim],delta_weights[dim])
def test_Learn(self):
# Test if the Learn() function calculates correctly with two iterations of song pairs
self.learner.learn(training_set)
for dim in dim_full_set:
self.assertAlmostEqual(training_weights[dim], self.learner.model[dim])
if __name__ == '__main__':
unittest.main()
|
{"/gradient_descent_tests.py": ["/gradient_descent.py"]}
|
27,438
|
bmickunas/BeatGoesOn
|
refs/heads/master
|
/beatgoeson.py
|
'''
beatgoeson.py - Script and supporting classes, functions and data structures
for BeatGoesOn, our continuous song playlist generator.
Authors: Sam Hatfield and Bradley Mickunas
Date: December 12, 2012
'''
import math
import utils
import ujson
import json
dim_full_set = ['danceability','energy','speechiness', 'liveness',
'tempo', 'loudness',
'mode','key','time_signature'
]
dim_small_set = ['danceability','energy','tempo']
# the max and min values for normalization into a 0.0-1.0 space
norm_ref = {
'key': {'max': 11.0, 'min': 0.0},
'loudness': {'max': 0.5, 'min': -52.0},
'tempo': {'max': 265.0, 'min': 0.0},
'mode': {'max': 1.0, 'min': 0.0},
'time_signature': {'max': 7.0, 'min': 0.0}
}
'''
This nested list structure allows us to remap the keys as reported by Echo Nest
to the Circle of Fifths defined in music theory, giving more accurate key
similarity.
'''
key_remap = [
#array 0: minor mode
#0 1 2 3 4 5 6 7 8 9 10 11 (mapping index)
#C Cs D Eb E F Fs G Gs A Bb B (original order)
#a e b fs cs gs eb bb f c g d (new mapping order)
[9, 4, 11, 6, 1, 8, 3, 10, 5, 0, 7, 2],
#array 1: major mode
#0 1 2 3 4 5 6 7 8 9 10 11 (mapping index)
#C Cs D Eb E F Fs G Ab A Bb B (original order)
#C G D A E B Fs Cs Ab Eb Bb F (new mapping order)
[0, 7, 2, 9, 4, 11, 6, 1, 8, 3, 10, 5]
]
'''
These weights were determined by our machine learning algorithm, which is based
on gradient descent. Relatively larger values increase the importance of a
given feature, while smaller values decrease a feature's importance in
similarity calculation.
'''
weights = {
'danceability': 1.7025,
'energy': 1.7199,
'speechiness': 1.0598,
'liveness': 1.8126,
'tempo': 1.5437,
'loudness': 2.1480,
'mode': 1.6917,
'key': 1.4571,
'time_signature': 1.7686
}
class BeatGoesOn(object):
"""
A searchommender (search/reccommender) for a continuous & smooth playlist
of songs.
"""
def __init__(self):
self.song_space = [] # vector space of all songs to choose from
def vectorize(self, songs):
'''
Reads in data and creates normalized 'vector' data structures
for each song.
Parameters:
songs - list of songs read in from a cleanly-formatted json file.
'''
# songs is the list of nice data structures with the info we need
for song in songs:
#remap the key ordering to circle of fifths
song['key'] = key_remap[song['mode']][song['key']]
# get the normalized vector of for the song characteristics
song['vect'] = self.normed_vect(song)
# store song in song_space
self.song_space.append(song)
def normed_vect(self, song):
'''
Creates vector of normalized data for a song.
Parameters:
song - a song data dict.
'''
# Create the vector that has the dimensions with scores
#vect = {dim:song[dim] for dim in dim_full_set}
vect = {}
# Make all scores between 0 and 1 by using the max and min values
# for that specific field from the EchoNest API
for dim in dim_full_set:
vect[dim] = song[dim]
if dim in norm_ref:
normed_score = ((float(vect[dim]) - norm_ref[dim]['min'])
/ (norm_ref[dim]['max'] - norm_ref[dim]['min']))
vect[dim] = normed_score
vect[dim] = float(vect[dim]) * weights[dim]
return vect
def searchommend(self, seed, playlist):
'''
Calculates similarity value between song and all songs in song_space,
then picks the most similar song for the next entry in playlist.
Parameters:
seed - Song we are comparing against (previous entry in playlist)
playlist - current list of songs, used to make sure we don't pick
the same song twice
'''
most_similar = []
for song in self.song_space:
# first, we check if the same song is already in the playlist
already_found = False
for item in playlist:
# if the title of the song under review shows up in
#another title in the playlist and the artist is the same,
# throw out the song under review
if ((item['title'].lower() in song['title'].lower()
or song['title'].lower() in item['title'].lower())
and item['artist_name'] == song['artist_name']):
already_found = True
if already_found:
continue
# Calculate the Euclidian Distance between the seed and all songs
total = 0
for dim in dim_full_set:
diff = seed['vect'][dim] - song['vect'][dim]
# for key, we use a circular distance measure
# (i.e. a key of 11 is next to 0)
# if the diff is more than 6, there is a shorter route on the
# other side of the circle
if dim == 'key' and abs(diff) > (6.0/11.0):
diff = (12.0/11.0) - abs(diff)
total += diff**2
eucl_dist = math.sqrt(total)
# if a minimum hasn't been set yet, use this song
if (len(most_similar) == 0):
most_similar.append(song)
most_similar.append(eucl_dist)
#print '\tFirst result:', song['title'], ',', eucl_dist
#for dim in dim_full_set:
#print '\t\t', dim, song[dim]
else:
# if the distance is less than the minimum, use this song
if ((eucl_dist < most_similar[1]) and (playlist.count(song)==0)):
most_similar[0] = song
most_similar[1] = eucl_dist
#print '\tNew max:', song['title'], ',', eucl_dist
#for dim in dim_full_set:
#print '\t\t', dim, song[dim]
return most_similar[0]
def generate_playlist(self, play_count, initial_song):
''''
Generates a playlist by calling searchommend repeatedly for the desired
number of songs.
Parameters:
play_count - the number of songs for the playlist
initial_song - the song dict selected by the user
'''
playlist = []
playlist.append(initial_song)
result = self.searchommend(initial_song, playlist)
playlist.append(result)
for i in range(play_count-2):
result = self.searchommend(result, playlist)
playlist.append(result)
return playlist
if __name__ == '__main__':
'''
When run from the command line, this script loads the data in
'clean_full_data.json' (a hardcoded filename) and runs the BeatGoesOn user
program.
Input:
clean_full_data.json - a list of cleanly-formatted song dicts
user input - decisions on input songs, number of songs, and output
Output:
text displayed on command line
'xxxxxx.json' - user-defined output file for playlists
'''
beatbox = BeatGoesOn()
print "Reading Data..."
data_file = open("clean_full_data.json", 'r')
# uncomment the line below and comment the line above to run from the data
# stored on github
#data_file = open("top_1000_clean_songs.json", 'r')
data = ujson.load(data_file)
data_file.close()
beatbox.vectorize(data)
print "Initializing Data..."
while(1):
print "Enter the title of your first song:"
title = raw_input('--> ')
seed = {}
results = []
for song in beatbox.song_space:
if title.lower() in song['title'].lower():
results.append(song)
if len(results) == 0:
print "Error: Song not found in our database. Please try again."
continue
elif len(results) > 1:
print "Found multiple results:"
i = 1
for result in results:
print i,".) ", result['title'], " by ", result['artist_name']
i +=1
print "Enter number of the correct song (or -1 if not found):"
selection = raw_input('--> ')
if int(selection) < 0 or int(selection) > len(results):
print "Error: Number invalid. Please try again."
continue
seed = results[int(selection) - 1]
else:
seed = results[0]
print "Enter how many songs you want on the playlist:"
song_num = raw_input('--> ')
groovy_playlist = beatbox.generate_playlist(int(song_num),seed)
i = 1
print "Results:"
for song in groovy_playlist:
print i,".) ", song['title'], " by ", song['artist_name']
# print song features
#for dim in dim_full_set:
#print '\t', dim, song[dim]
i = i + 1
print "Save this playlist to json? y/n"
response = raw_input('--> ')
if response[0] == 'y':
print "Please enter 'filename.json':"
filename = raw_input('--> ')
output_file = open(filename, 'w')
json.dump(groovy_playlist, output_file, indent=4)
output_file.close()
print "Retry? y/n"
response = raw_input('--> ')
if response[0] == 'n':
break
|
{"/gradient_descent_tests.py": ["/gradient_descent.py"]}
|
27,439
|
bmickunas/BeatGoesOn
|
refs/heads/master
|
/gradient_descent.py
|
'''
gradient_descent.py - Gradient descent algorithm for machine learning
weights for features in BeatGoesOn.
Author: Bradley Mickunas
Date: December 12, 2012
'''
import ujson as json
# Full set of song features that are used as dimensions for song vectors
dim_full_set = ['danceability','energy','key','loudness',
'tempo', 'speechiness', 'liveness', 'mode',
'time_signature']
class GradientDescent(object):
def __init__(self):
self.model = {} # dictionary of dimension weights
for dim in dim_full_set:
self.model[dim] = 1.0 # Initialize the weights in the model for each dimension to 1.0
self.learning_rate = 1.0 # Initialize the learning rate for machine learning to 1.0
self.N = 1.0 # Number of song pairs that have gone through the learning algorithm.
def _error(self, input, target_song):
# Calculate the difference between the summation of the target song and the output from using the model
actual_output = 0.0
target_output = 0.0
for dim in dim_full_set:
actual_output = actual_output + input['vect'][dim]*self.model[dim]
for dim in target_song['vect']:
target_output = target_output + target_song['vect'][dim]
return abs(target_output - actual_output) # Take the absolute value of the error between the two song vectors
def _calc_gradient(self, learning_rate, loss, input):
delta_weight = {} # The gradient vector that holds the calculated change for each dimension in the model
for dim in dim_full_set:
delta_weight[dim] = learning_rate*loss*input['vect'][dim]
return delta_weight
def learn(self, song_pairs):
# The song_pairs parameter is a list of "Good" song pairs that would succeed each other in the playlist well
for pair in song_pairs:
loss = self._error(pair[0], pair[1])
# Compute the adjustment to the weights, delta_w = alpha*(dL/dw),
del_w = self._calc_gradient(self.learning_rate, loss, pair[0])
# Update the weights vector, w_2 = w_1 + delta_w
for key in self.model.iterkeys():
self.model[key] = self.model[key] + del_w[key]
# Adjust the learning rate according to the number of song pairs that have passed through the algorithm
self.learning_rate = self.learning_rate/(self.N**(0.5))
self.N = self.N + 1.0 # Increment that count for the number of pairs that have trained the model
if __name__ == '__main__':
learner = GradientDescent()
print "Reading Training Set..."
data_file = open("training_set.json",'r')
loaded_training_set = json.load(data_file)
print "Learning..."
learner.learn(loaded_training_set)
print learner.model
|
{"/gradient_descent_tests.py": ["/gradient_descent.py"]}
|
27,440
|
bmickunas/BeatGoesOn
|
refs/heads/master
|
/utils.py
|
'''
utils.py - Utility functions for BeatGoesOn.
Authors: Sam Hatfield and Bradley Mickunas
Date: December 12, 2012
'''
import ujson as json
import json
import fileinput
song_ignore = ['search_rank','tracks','audio_md5']
analysis_ignore = ['bars','segments','track','beats','meta',
'sections','tatums','audio_md5']
audio_summary_ignore = ['audio_md5']
def read_songs():
for line in fileinput.input():
yield json.loads(line)
seen = set()
def reorg_songs(songs):
clean_songs = []
for song in songs:
clean_song = {}
clean_song['title'] = song['title']
clean_song['artist_name'] = song['artist_name']
clean_song['energy'] = song['audio_summary']['energy']
clean_song['tempo'] = song['audio_summary']['tempo']
clean_song['speechiness'] = song['audio_summary']['speechiness']
clean_song['key'] = song['audio_summary']['key']
clean_song['duration'] = song['audio_summary']['duration']
clean_song['liveness'] = song['audio_summary']['liveness']
clean_song['mode'] = song['audio_summary']['mode']
clean_song['time_signature'] = song['audio_summary']['time_signature']
clean_song['loudness'] = song['audio_summary']['loudness']
clean_song['danceability'] = song['audio_summary']['danceability']
clean_songs.append(clean_song)
return clean_songs
|
{"/gradient_descent_tests.py": ["/gradient_descent.py"]}
|
27,441
|
bmickunas/BeatGoesOn
|
refs/heads/master
|
/beatgoeson_tests.py
|
'''
beatgoeson_tests.py - Basic unittests for beatgoeson.py.
Author: Sam Hatfield
Date: December 12, 2012
'''
import unittest
import beatgoeson
expected_keys_small = ['danceability','energy','liveness']
expected_keys_large = ['danceability','energy','key','loudness',
'tempo', 'speechiness', 'liveness', 'mode',
'time_signature']
weights = {
'danceability': 1.7025,
'energy': 1.7199,
'speechiness': 1.0598,
'liveness': 1.8126,
'tempo': 1.5437,
'loudness': 2.1480,
'mode': 1.6917,
'key': 1.4571,
'time_signature': 1.7686
}
expected_norms = {
'energy': 0.5*weights['energy'],
'tempo': (250.0/265.0)*weights['tempo'],
'speechiness': 0.5*weights['speechiness'],
# NOTE: when we run normed_vect directly, keys aren't remapped
'key': (11.0/11.0)*weights['key'],
'liveness': 0.5*weights['liveness'],
'mode': 1.0*weights['mode'],
'time_signature': (3.0/7.0)*weights['time_signature'],
'loudness': ((0.0+52.0)/52.5)*weights['loudness'],
'danceability': 0.5*weights['danceability']
}
mock_data = [
{
'title': 'Lower',
'artist_name': 'Downer',
'energy': 0.3,
'tempo': 150.0,
'speechiness': 0.3,
'key': 9, #3 in new mapping
'liveness': 0.3,
'mode': 1,
'time_signature': 3.0,
'loudness': -40.0,
'danceability': 0.3
},
{
'title': 'Low',
'artist_name': 'Down',
'energy': 0.4,
'tempo': 200,
'speechiness': 0.4,
'key': 4, #4 in new mapping
'liveness': 0.4,
'mode': 1,
'time_signature': 3,
'loudness': -20,
'danceability': 0.4
},
{
'title': 'Middle',
'artist_name': 'Man',
'energy': 0.5,
'tempo': 250.0,
'speechiness': 0.5,
'key': 11, #5 in new mapping
'liveness': 0.5,
'mode': 1,
'time_signature': 3,
'loudness': 0,
'danceability': 0.5
},
{
'title': 'Higher',
'artist_name': 'Upper',
'energy': 0.7,
'tempo': 350,
'speechiness': 0.7,
'key': 1, #7 in new mapping
'liveness': 0.7,
'mode': 1,
'time_signature': 3,
'loudness': 40,
'danceability': 0.7
},
{
'title': 'Highest',
'artist_name': 'Top',
'energy': 1.0,
'tempo': 500,
'speechiness': 1.0,
'key': 10, #10 in new mapping
'liveness': 1.0,
'mode': 1,
'time_signature': 3,
'loudness': 100,
'danceability': 1.0
}
]
class TestVectorize(unittest.TestCase):
# perform necessary actions for other tests
def setUp(self):
self.beatbox = beatgoeson.BeatGoesOn()
self.beatbox.vectorize(mock_data)
# vectorize was already run in setUp(), so let's check it
def test_vectorize(self):
# check that the length of song space matches the length of mock_data
self.assertEqual(len(self.beatbox.song_space), len(mock_data))
# make sure that the song space has the expected features
self.assertEqual(set(self.beatbox.song_space[0]['vect'].keys()),
set(expected_keys_large))
def test_normed_vect(self):
vector = self.beatbox.normed_vect(mock_data[2])
# Test the normalization of the song vectors
for dim in vector:
print dim, ':', vector[dim]
self.assertAlmostEqual(vector[dim], expected_norms[dim])
def test_searchommend(self):
playlist = []
result = self.beatbox.searchommend(mock_data[2], playlist)
# if we call search w/ an empty list, the result should be the input
self.assertEqual(result, mock_data[2])
# now add the seed to the playlist as we expect
playlist.append(mock_data[2])
result2 = self.beatbox.searchommend(mock_data[2], playlist)
# this time we shouldn't return the input song
self.assertNotEqual(result2, mock_data[2])
# the closest song should be "Low" in mock_data
self.assertEqual(result2, mock_data[1])
def test_generate_playlist(self):
playlist = self.beatbox.generate_playlist(5, mock_data[2])
# make sure the returned playlist has the requested length
self.assertEqual(len(playlist), 5)
# Make sure there are no duplicates
for i in range(5):
for j in range(5):
if i == j:
continue
else:
self.assertNotEqual(playlist[i], playlist[j])
# now check if the playlist has the correct order
self.assertEqual(playlist[0], mock_data[2])
self.assertEqual(playlist[1], mock_data[1])
self.assertEqual(playlist[2], mock_data[0])
self.assertEqual(playlist[3], mock_data[3])
self.assertEqual(playlist[4], mock_data[4])
if __name__ == '__main__':
unittest.main()
|
{"/gradient_descent_tests.py": ["/gradient_descent.py"]}
|
27,442
|
bmickunas/BeatGoesOn
|
refs/heads/master
|
/data_download.py
|
'''
data_download.py - Script and functions for downloading Echo Nest API data
for use in BeatGoesOn.
Author: Sam Hatfield
Date: December 12, 2012
'''
#!/usr/bin/env python
import requests
import ujson as json
import time
import json
import utils
from settings import settings
prev_headers = {}
def rate_limit_wait(headers, thresh = 30, wait_time = 2):
'''
A function to make our program wait so that we don't hit the rate limit.
Parameters:
header - the dict headers object returned as part of requests.get
thresh - the threshold at which we start waiting
wait_time - the amount of time to wait, in seconds
NOTE: Occasionally, despite this function, we hit the rate limit.
We feel that this is due to the fact that the RateLimit-Remaining
is stated to be an estimate by Echo Nest.
'''
if int(headers['X-RateLimit-Remaining']) <= 6:
print 'Hard limit reached, wait for 10 seconds'
time.sleep(10)
if int(headers['X-RateLimit-Remaining']) <= thresh:
print 'Near rate limit, waiting for', wait_time, 'seconds.'
time.sleep(wait_time)
def get_top_artists(hundreds=10):
'''
Retrieves up to 1000 of the "hotttest" artists as defined by Echo Nest."
Parameters:
hundreds - specify how many results you want in hundreds
'''
s_params = {}
s_params['api_key'] = settings['api_key']
s_params['bucket'] = ['id:spotify-WW']
s_params['limit'] = 'true'
s_params['results'] = '100'
top_hottt_url = 'http://developer.echonest.com/api/v4/artist/top_hottt'
my_results = []
global prev_headers
# this is a reference so we can easily see what artists we downloaded
log_file = open("get_top_artists_log.txt", 'w')
# initialize prev_headers for rate_limit_wait()
if prev_headers == {}:
print 'Initializing rate limit...'
prev_headers = {'X-RateLimit-Remaining': '120'}
for i in range(0, hundreds):
#print 'Downloading', i, 'th hundred artists...'
s_params['start'] = i*100
rate_limit_wait(prev_headers)
raw_results = requests.get(top_hottt_url, params=s_params)
results = raw_results.json
prev_headers = raw_results.headers
# check to make sure our search worked correctly
if results['response']['status']['code'] != 0:
print 'Search error!'
print results['response']['status']
log_file.write('Search error!\n')
log_file.write("\t" + results['response']['status'] + '\n')
break
results_count = len(results['response']['artists'])
for j in range(results_count):
artist = results['response']['artists'][j]
json.dump(artist['name'], log_file, indent=4)
log_file.write('\n')
my_results.append(artist)
# break if Echo Nest stops returning results for this query
if results_count < 100:
break
log_file.close()
return my_results
def get_song_results(s_params, hundreds=10):
'''
Retrieves up to 1000 results for a particular song search query.
Parameters:
s_params - search parameters for the query. A few of these are set
internally, but the search-focused parameters are left
to be specified by the caller.
hundreds - specify how many hundred results you want from the query
Returns:
my_results - a list of songs with our added data
'''
if 'api_key' not in s_params:
s_params['api_key'] = settings['api_key']
s_params['bucket'] = ['audio_summary', 'id:spotify-WW', 'tracks']
s_params['limit'] = 'true'
s_params['results'] = '100'
search_url = 'http://developer.echonest.com/api/v4/song/search'
my_results = []
global prev_headers
# initialized for the use of rate_limit_wait()
if prev_headers == {}:
print 'Initializing rate limit...'
prev_headers = {'X-RateLimit-Remaining': '120'}
for i in range(0, hundreds):
#print 'Downloading', i, 'th hundred songs...'
# enclose this in a loop so we can retry our query if needed
while True:
s_params['start'] = i*100
rate_limit_wait(prev_headers)
raw_results = requests.get(search_url, params=s_params)
results = raw_results.json
prev_headers = raw_results.headers
# check to make sure our search worked correctly
if results['response']['status']['code'] != 0:
print 'Search error!'
print results['response']['status']
print "Trying again..."
continue
results_count = len(results['response']['songs'])
#print "number of results:", results_count
for j in range(results_count):
#print j
song = results['response']['songs'][j]
# The code below downloads a detailed analysis of the song.
# We didn't end up using this data.
'''
detail_url = song['audio_summary']['analysis_url']
#print detail_url
analysis = requests.get(detail_url)
#print analysis.status_code
song['analysis'] = analysis.json
song['search_rank'] = i*100 + j
'''
my_results.append(song)
break
# break if Echo Nest stops returning results for this query
if results_count < 100:
break
return my_results
if __name__ == "__main__":
'''
When run from the command line, this script downloads the top 100 songs
from the top 1000 artists (as ranked by Echo Nest).
Output:
get_top_artists_log.txt - A list of all artists downloaded.
full_data_index.json - A list of all songs downloaded, sorted by artist.
clean_full_data.json - Downloaded song data, reorganized into an
easy-to-use list of dicts.
'''
start_time = time.time()
print "Starting getting artists..."
artists = get_top_artists()
end_time = time.time()
print 'Got artists after %.3f seconds\n'%(end_time - start_time)
params = {"sort": "song_hotttnesss-desc"}
clean_list = {}
full_data = []
start_time = time.time()
print "Starting getting songs..."
for artist in artists:
clean_list[artist['name']] = []
params['artist_id'] = artist['id']
# query for top 100 songs for this artist
songs = get_song_results(params, 1)
for song in songs:
# add songs to full_data_index
clean_list[artist['name']].append(song['title'])
full_data.extend(songs)
#print 'Got', len(songs), 'songs for', artist['name']
end_time = time.time()
print 'Got songs after %.3f seconds\n'%(end_time - start_time)
# a utility function to arrange the data more cleanly
start_time = time.time()
print "Starting to reorganize data..."
nice_data = utils.reorg_songs(full_data)
end_time = time.time()
print 'Reorganized data after %.3f seconds\n'%(end_time - start_time)
start_time = time.time()
print "Starting data write..."
data_file = open("clean_full_data.json", 'w')
json.dump(nice_data, data_file, indent=4)
data_file.close()
ref_file = open("full_data_index.json", 'w')
json.dump(clean_list, ref_file, indent=4)
ref_file.close()
end_time = time.time()
print 'Wrote data after %.3f seconds\n'%(end_time - start_time)
|
{"/gradient_descent_tests.py": ["/gradient_descent.py"]}
|
27,469
|
SofiaAkbar/week3_weekend_hwk_rock_paper_scissors
|
refs/heads/main
|
/models/tests.py
|
############# TESTING LOGIC HERE ###############
# class Player():
# def __init__(self, name, choice):
# self.name = name
# self.choice = choice
# # =======================
# class Game():
# def __init__(self, player1, player2):
# self.player1 = player1
# self.player2 = player2
# def winning_choice(self, choice1, choice2):
# if choice1 == choice2:
# return None
# elif choice1 == "rock" and choice2 == "paper":
# return choice1
# elif choice1 == "rock" and choice2 == "scissors":
# return choice1
# elif choice1 == "scissors" and choice2 == "rock":
# return choice2
# elif choice1 == "scissors" and choice2 == "paper":
# return choice1
# elif choice1 == "paper" and choice2 == "rock":
# return choice2
# elif choice1 == "paper" and choice2 == "scissors":
# return choice2
# else:
# return None
# def find_winner(self):
# winning_choice = self.winning_choice(self.player1.choice, self.player2.choice)
# if self.player1.choice == winning_choice:
# return self.player1
# elif self.player2.choice == winning_choice:
# return self.player2
# else:
# return None
# # =======================
# game = Game(
# Player("personA", "rock"),
# Player("personB", "paper")
# )
# print(game.find_winner().name)
|
{"/controllers/controller.py": ["/models/game.py"]}
|
27,470
|
SofiaAkbar/week3_weekend_hwk_rock_paper_scissors
|
refs/heads/main
|
/models/game.py
|
from models.player import *
class Game():
def __init__(self, player1, player2):
self.player1 = player1
self.player2 = player2
def winning_choice(self, choice1, choice2):
if choice1 == "rock" and choice2 == "paper":
return choice2
elif choice1 == "rock" and choice2 == "scissors":
return choice1
elif choice1 == "scissors" and choice2 == "rock":
return choice2
elif choice1 == "scissors" and choice2 == "paper":
return choice1
elif choice1 == "paper" and choice2 == "rock":
return choice1
elif choice1 == "paper" and choice2 == "scissors":
return choice2
else:
return None
def find_winner(self):
winning_choice = self.winning_choice(self.player1.choice, self.player2.choice)
if self.player1.choice == winning_choice:
return self.player1
elif self.player2.choice == winning_choice:
return self.player2
else:
return None
|
{"/controllers/controller.py": ["/models/game.py"]}
|
27,471
|
SofiaAkbar/week3_weekend_hwk_rock_paper_scissors
|
refs/heads/main
|
/controllers/controller.py
|
from flask import render_template
from app import app
from models.game import *
from models.player import *
@app.route('/<string:choice1>/<string:choice2>')
def new_game(choice1, choice2):
player1 = Player("Player 1", choice1)
player2 = Player("Player 2", choice2)
game = Game(player1, player2)
winner = game.find_winner()
return render_template('base.html', winner=winner)
@app.route('/')
def index():
return render_template('index.html')
|
{"/controllers/controller.py": ["/models/game.py"]}
|
27,474
|
etups/PixHack2016
|
refs/heads/master
|
/jieba/Jieba.py
|
#!/usr/bin/env python
import jieba
import jieba.analyse
import glob
import re
import codecs
import sys
import os
from multiprocessing import Process
from lib.pixnetdb import PixnetDB
reload(sys)
sys.setdefaultencoding('utf-8')
currentPath = os.path.dirname(os.path.realpath(__file__))
# Set word_cut dictionary and stopword dictionary
stopwordFile = 'ch_stopwords.txt'
stopwordFilePath = os.path.join(currentPath, 'dict', stopwordFile)
dictFile = 'dict.txt.big.txt'
dictFilePath = os.path.join(currentPath, 'dict', dictFile)
jieba.set_dictionary(dictFilePath)
def get_stopWords(stopwordFile):
stopWords_set = set()
content = open(stopwordFile, 'rb').read().decode('utf-8')
for line in content.splitlines():
stopWords_set.add(line.strip())
return stopWords_set
# Load articles from pixnetdb
def load_data():
db = PixnetDB()
data_tuple = db.get_articles()
data_list = []
for data in data_tuple:
data_list.append(data[0])
return data_list
def filter_words(content):
tmp = []
for line in content.splitlines():
line = re.sub('[0-9.]', '', line)
line = re.sub('www[\w./-_]+', '', line)
line = re.sub('http[:\w./-_]+', '', line)
line = re.sub('[\w./-_@]+com', '', line)
line = re.sub('xd+', 'xd', line)
tmp.append(line.strip())
return tmp
def write_file(content, fileName):
with open(fileName, 'a+') as f:
f.write(content)
def start_jieba(data, fileName):
f = open(os.path.join(currentPath, fileName), 'a+')
stop_sc = get_stopWords(stopwordFilePath)
for content in data:
tmp = ''.join(filter_words(content.lower()))
seg_words = jieba.cut(tmp, cut_all=False, HMM=True)
# words: save content after jieba and remove stop words, words separate by blank
words = []
for word in seg_words:
if word.strip() not in stop_sc:
words.append(word.strip().encode('utf-8'))
else:
pass
f.write(re.sub('\s+', ' ', ' '.join(words))+'\n')
f.close()
def partition_data(data, number):
if len(data) < number:
return [data]
n = len(data) / number
part_data = []
for i in range(number-1):
p = data[n*i: n*(i+1)]
part_data.append(p)
t = data[n*(number-1):]
part_data.append(t)
return part_data
def start_jieba_process(data, num):
# use 4 process
pdata = partition_data(data, num)
procs = []
for i in range(num):
print "Process %d" % (i)
fileName = 'test%d.txt' % (i)
p = Process(target=start_jieba, args=(pdata[i], fileName))
p.start()
procs.append(p)
for p in procs:
p.join()
def main():
articles = load_data()
# We need thread to process massive articles
# Update: python thread not use full cpu, so change to process
if len(articles) > 100:
start_jieba_process(articles, 4)
else:
start_jieba(articles, 'test.txt')
if __name__ == '__main__':
main()
|
{"/crawler/pixnet/pipelines.py": ["/lib/pixnetdb.py", "/lib/lib.py"], "/crawler/pixnet/test/test_pixnetdb.py": ["/lib/pixnetdb.py", "/lib/lib.py"]}
|
27,475
|
etups/PixHack2016
|
refs/heads/master
|
/crawler/pixnet/pipelines.py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import sqlite3
import sys
import os
import logging
import time
from lib.pixnetdb import PixnetDB
from lib.lib import reverse_url
class SqlitePipeline(object):
db = None
def _tags_to_string(self, tags):
tag_str = ""
for tag in tags:
tag_str += tag + ','
tag_str = tag_str[0:-1]
return tag_str
def _store_aritcle(self, item):
article_data = (
item['title'],
reverse_url(item['link']),
item['content'],
self._tags_to_string(item['tags']),
item['pixnet_category'],
item['personal_category'],
0,
item['article_id'],
item['author_id'],
item['date']
)
self.db.store_article_data(article_data)
return
def _store_author(self, item):
author_data = (
item['author_id'],
item['author_name'],
item['site_name'],
0,
time.time(),
time.time(),
reverse_url(item['link'])
)
self.db.store_author_data(author_data)
# Scrapy Pipline
def process_item(self, item, spider):
self._store_aritcle(item)
self._store_author(item)
#return item
def open_spider(self, spider):
try:
self.db = PixnetDB()
except:
logging.error(str(sys.exc_info()))
return
def close_spider(self, spider):
self.db.close()
return
|
{"/crawler/pixnet/pipelines.py": ["/lib/pixnetdb.py", "/lib/lib.py"], "/crawler/pixnet/test/test_pixnetdb.py": ["/lib/pixnetdb.py", "/lib/lib.py"]}
|
27,476
|
etups/PixHack2016
|
refs/heads/master
|
/word2vec/word2vec_tf.py
|
import tensorflow as tf
from tensorflow.models.embedding.word2vec_optimized import Word2Vec, Options
from six.moves import xrange
import os
import time
class MyWord2Vec(Word2Vec):
# override
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
# override save_vocab
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode("utf-8")
f.write("%s %d\n" % (tf.compat.as_text(opts.vocab_words[i]).encode('utf-8'),
opts.vocab_counts[i]))
def main():
# set word2vec options
options = Options()
options.save_path = "tf/"
options.train_data = "test.txt"
options.batch_szie = 5000
options.window_size = 4
options.subsample = 0
options.epochs_to_train = 5
options.concurrent_steps = 4
with tf.Graph().as_default():
with tf.Session() as session:
with tf.device("/cpu:0"):
model = MyWord2Vec(options, session)
for _ in xrange(options.epochs_to_train):
model.train() # Process one epoch
#model.eval() # Eval analogies.
model.saver.save(session,
os.path.join(options.save_path, "model.ckpt"),
global_step=model.global_step)
if __name__ == '__main__':
start = time.time()
main()
spend = time.time() - start
print ""
print "Spend time : %d s" % (spend)
|
{"/crawler/pixnet/pipelines.py": ["/lib/pixnetdb.py", "/lib/lib.py"], "/crawler/pixnet/test/test_pixnetdb.py": ["/lib/pixnetdb.py", "/lib/lib.py"]}
|
27,477
|
etups/PixHack2016
|
refs/heads/master
|
/crawler/pixnet/spiders/blog.py
|
# -*- coding: utf-8 -*-
import scrapy
import sys
import time
import datetime
from BeautifulSoup import BeautifulSoup
from BeautifulSoup import Tag
from pixnet.items import PixnetItem
from lib.pixnetdb import PixnetDB
target_category = [
u"國內旅遊",
u"國外旅遊",
u"時尚流行",
u"美容彩妝",
u"美味食記"
]
class BlogSpider(scrapy.Spider):
name = "blog"
allowed_domains = ["pixnet.net"]
pixnet_url_prefix = "https://www.pixnet.net"
start_urls = (
pixnet_url_prefix + '/blog',
)
db = PixnetDB()
def parse(self, response):
for path in response.xpath('//ul[@id="navigation"]/li/ul/li'):
cat_url = path.xpath('a/@href').extract().pop()
cat_name = path.xpath('a/text()').extract().pop()
if cat_name in target_category:
yield scrapy.Request(self.pixnet_url_prefix + cat_url, callback = self.parse_category_top)
def parse_category_top(self, response):
soup = BeautifulSoup(response.body)
# get rank 1
featured = soup.find('div', {"class": "featured"})
if featured:
feature_link = featured.a.get('href')
yield scrapy.Request(feature_link, callback = self.parse_blog_content)
# get rank 2 ~
article_list = soup.find('ol', {"class": "article-list"})
if article_list:
for li in article_list.findAll('li'):
link = li.a.get('href')
yield scrapy.Request(link, callback = self.parse_blog_content)
# get othre rank page
for i in range(2, 44):
other_link = response.url + ('/hot/%d' % i)
yield scrapy.Request(other_link, callback = self.parse_category_other)
def parse_category_other(self, response):
soup = BeautifulSoup(response.body)
# get rank 2 ~
article_list = soup.find('ol', {"class": "article-list"})
if article_list:
for li in article_list.findAll('li'):
link = li.a.get('href')
yield scrapy.Request(link, callback = self.parse_blog_content)
def parse_blog_content(self, response):
soup = BeautifulSoup(response.body)
item = self._get_blog_item(response, soup)
if item is not None:
yield item
link = self._get_next_link(soup)
if link is not "":
print "[NEXT]" + link
yield scrapy.Request(link, callback = self.parse_blog_content)
link = self._get_prev_link(soup)
if link is not "":
print "[PREV]" + link
yield scrapy.Request(link, callback = self.parse_blog_content)
def _is_secret_aritcle(self, soup):
if soup.find('ul', {"class" : "secret-code-notify"}):
return True
else:
return False
def _get_blog_item(self, response, soup):
if self._is_secret_aritcle(soup):
return None
item = PixnetItem()
item['date'] = self._extract_publish_timestamp(response)
item['title'] = self._extract_title(response)
item['article_id'] = self._extract_article_id(response)
item['link'] = response.url
item['tags'] = self._extract_tags(response)
item['pixnet_category'] = self._extract_pixnet_category(response)
item['personal_category'] = self._extract_personal_category(response)
item['author_id'] = self._extract_author_id(response)
item['author_name'] = self._extract_author_name(response)
item['site_name'] = self._extract_site_name(response)
item['content'] = self._extract_content(response)
return item
def _get_next_link(self, soup):
find = soup.find('a', {"class": "quick-nav--next"})
if find:
link = find.get("href")
else:
link = ""
if not self.db.exist_article_link(link):
return link
else:
return ""
def _get_prev_link(self, soup):
find = soup.find('a', {"class": "quick-nav--pre"})
if find:
link = find.get("href")
else:
link = ""
if not self.db.exist_article_link(link):
return link
else:
return ""
def _extract_publish_timestamp(self, response):
pub_month = response.xpath('//span[@class="month"]/text()').extract_first()
pub_date = response.xpath('//span[@class="date"]/text()').extract_first()
pub_day = response.xpath('//span[@class="day"]/text()').extract_first()
pub_year = response.xpath('//span[@class="year"]/text()').extract_first()
pub_time = response.xpath('//span[@class="time"]/text()').extract_first()
date_string = "%s %s %s %s" % (pub_year, pub_month, pub_date, pub_time)
timestamp = time.mktime(datetime.datetime.strptime(date_string, \
"%Y %b %d %H:%M").timetuple())
return timestamp
def _extract_title(self, response):
extract = response.xpath('//title/text()').extract_first()
title = unicode(extract.split("@")[0])
return title
def _extract_article_id(self, response):
extract = response.xpath('//body/@data-article-id').extract_first()
article_id = long(extract)
return article_id
def _extract_tags(self, response):
extract = response.xpath('//a[@rel="tag"]/text()').extract()
tags = []
for tag in extract:
tags.append(unicode(tag))
return tags
def _extract_pixnet_category(self, response):
extract = response.xpath('//ul[@class="refer"]/li/a/text()').extract_first()
category = unicode(extract)
return category
def _extract_personal_category(self, response):
extract = response.xpath('//ul[@class="refer"]/li/a/text()').extract()[1]
category = unicode(extract)
return category
def _extract_author_id(self, response):
extract = response.xpath('//meta[@name="author"]/@content').extract_first()
author_id = extract.split('(')[0][0:-1]
return author_id
def _extract_author_name(self, response):
extract = response.xpath('//meta[@name="author"]/@content').extract_first()
author_name = unicode(extract.split('(')[1][0:-1])
return author_name
def _extract_site_name(self, response):
extract = response.xpath('//meta[@property="og:site_name"]/@content').extract_first()
site_name = unicode(extract)
return site_name
def _extract_content(self, response):
article = ""
for line in response.xpath('//p').extract():
soup = BeautifulSoup(line)
if self._need_skip_line(soup.p):
continue
elif 'script' in line:
continue
raw_text = unicode(soup.text) + '\n'
if raw_text.startswith("Skip to article") \
or raw_text.startswith("Global blog category"):
continue
elif raw_text.startswith("Posted by"):
break
elif raw_text.strip() == '':
continue
else:
article += raw_text
return article
def _need_skip_line(self, soup):
try:
if type(soup.contents[0]) is Tag:
if soup.contents[0].name == 'a' :
return True
elif soup.contents[0].name == 'img':
return True
else:
return False
elif soup.find('script') is not None:
return True
else:
return False
except Exception as e:
return False
|
{"/crawler/pixnet/pipelines.py": ["/lib/pixnetdb.py", "/lib/lib.py"], "/crawler/pixnet/test/test_pixnetdb.py": ["/lib/pixnetdb.py", "/lib/lib.py"]}
|
27,478
|
etups/PixHack2016
|
refs/heads/master
|
/neuralnetwork/softmax.py
|
import numpy as np
def softmax(x):
if len(x.shape) > 1:
tmp = np.max(x, axis = 1)
x -= tmp.reshape((x.shape[0], 1))
x = np.exp(x)
tmp = np.sum(x, axis = 1)
x /= tmp.reshape((x.shape[0], 1))
else:
tmp = np.max(x)
x -= tmp
x = np.exp(x)
tmp = np.sum(x)
x /= tmp
return x
|
{"/crawler/pixnet/pipelines.py": ["/lib/pixnetdb.py", "/lib/lib.py"], "/crawler/pixnet/test/test_pixnetdb.py": ["/lib/pixnetdb.py", "/lib/lib.py"]}
|
27,479
|
etups/PixHack2016
|
refs/heads/master
|
/lib/lib.py
|
def reverse_url(url):
arr = url.split('/')
rev = ''
for i in reversed(arr):
rev += i + '/'
rev = rev[0:-1]
return rev
|
{"/crawler/pixnet/pipelines.py": ["/lib/pixnetdb.py", "/lib/lib.py"], "/crawler/pixnet/test/test_pixnetdb.py": ["/lib/pixnetdb.py", "/lib/lib.py"]}
|
27,480
|
etups/PixHack2016
|
refs/heads/master
|
/jieba/detect_lang.py
|
import os
import langid
import glob
filter_lang = ['ja', 'ko']
def main():
filter_dict = dict()
for filename in glob.glob('./*.txt'):
f = open(filename, 'r')
for line in f.readlines():
for word in line.split():
lang, conf = langid.classify(word)
if lang in filter_lang:
if word in filter_dict:
filter_dict[word] += 1
else:
filter_dict[word] = 1
f.close()
f = open("filter.txt", "w+")
for k, v in filter_dict.items():
f.write("%s %d\n" % (k, v))
if __name__ == '__main__':
main()
|
{"/crawler/pixnet/pipelines.py": ["/lib/pixnetdb.py", "/lib/lib.py"], "/crawler/pixnet/test/test_pixnetdb.py": ["/lib/pixnetdb.py", "/lib/lib.py"]}
|
27,481
|
etups/PixHack2016
|
refs/heads/master
|
/word2vec/Word2Vec.py
|
#encoding=utf-8
#!/usr/bin/python
import gensim, logging
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
# Set features
dirnamecsvFile='test.txt'
model_name='Pixnet2016'
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
class MySentences(object):
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
content = open(self.dirname+fname, 'rb').read()
for line in content.splitlines():
yield line.split()
def trainModelByfile():
sentences = MySentences('scrapy_wiki/') # a memory-friendly iterator
model = gensim.models.Word2Vec(sentences)
return model
def saveModel(trainModel,modelName):
trainModel.save(modelName)
def loadModel(modelName):
new_model = gensim.models.Word2Vec.load(modelName)
return new_model
def usingModel(model):
similarity = raw_input("Input word:")
print model[similarity]
print('most_similar:' +similarity)
for w in model.most_similar(similarity, topn=5):
print w[0], w[1]
def fromArticleFile():
sentences = MySentences('scrapy_wiki/') # a memory-friendly iterator
model = gensim.models.word2vec.Word2Vec(sentences, size=200, window=4, min_count=1, sg=0,\
hs=1, negative=0, cbow_mean=1, workers=3, iter=5,\
sored_vocab=1, batch_words=5000)
saveModel(model, model_name)
def fromSavedModel():
new_model = gensim.models.Word2Vec.load(model_name)
usingModel(new_model)
fromArticleFile()
# fromSavedModel()
|
{"/crawler/pixnet/pipelines.py": ["/lib/pixnetdb.py", "/lib/lib.py"], "/crawler/pixnet/test/test_pixnetdb.py": ["/lib/pixnetdb.py", "/lib/lib.py"]}
|
27,482
|
etups/PixHack2016
|
refs/heads/master
|
/crawler/pixnet/test/test_pixnetdb.py
|
#!/usr/bin/python
import os
import sys
import unittest
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from lib.pixnetdb import PixnetDB
from lib.lib import reverse_url
class PixnetDBTest(unittest.TestCase):
def setUp(self):
self.db = PixnetDB()
def test_modify_article_data(self):
article_data = (
'test_title',
reverse_url('http://test.link.com/'),
'This is content of test article.',
'tag1,tag2',
'test_category',
'personal_category',
0,
1234,
'raix',
12345678
)
article_link = reverse_url(article_data[1])
article_content = article_data[2]
article_id = article_data[7]
# Test insert article data
self.db.store_article_data(article_data)
self.assertTrue(self.db.exist_article_id(article_id))
# Test get article data
select = self.db.get_article_data(article_id)
for i in range(len(article_data)):
self.assertEqual(article_data[i], select[i])
# Test exist_author
self.assertTrue(self.db.exist_article_link(article_link))
# Test get article content by article id
select = self.db.get_articles_by("article_id", article_id)
self.assertEqual(article_content, select[0][0])
# Test delete article data
self.db.delete_article_data(article_id)
self.assertFalse(self.db.exist_article_id(article_id))
def test_modify_author_data(self):
author_data = (
'raix',
'raix lai',
'Raix blog',
0,
12345678,
12345678,
reverse_url('http://test.link.com/')
)
author_id = author_data[0]
# Test insert author data
self.db.store_author_data(author_data)
self.assertTrue(self.db.exist_author(author_id))
# Test get author data
select = self.db.get_author_data(author_id)
for i in range(len(author_data)):
self.assertEqual(author_data[i], select[i])
# Test exist_author
self.assertTrue(self.db.exist_author(author_id))
# Test modify author data
modify_data = (
'raix',
'raix lai',
'Raix blog',
0,
12345678,
12345700,
'http://test.link.com/modify'
)
self.db.update_author_data(modify_data)
select = self.db.get_author_data(author_id)
for i in range(len(modify_data)):
self.assertEqual(modify_data[i], select[i])
# Test delete author data
self.db.delete_author_data(author_id)
self.assertFalse(self.db.exist_author(author_id))
def tearDown(self):
self.db.close()
if __name__ == '__main__':
unittest.main()
|
{"/crawler/pixnet/pipelines.py": ["/lib/pixnetdb.py", "/lib/lib.py"], "/crawler/pixnet/test/test_pixnetdb.py": ["/lib/pixnetdb.py", "/lib/lib.py"]}
|
27,483
|
etups/PixHack2016
|
refs/heads/master
|
/crawler/pixnet/items.py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class PixnetItem(scrapy.Item):
date = scrapy.Field()
title = scrapy.Field()
link = scrapy.Field()
content = scrapy.Field()
tags = scrapy.Field()
pixnet_category = scrapy.Field()
personal_category = scrapy.Field()
article_count = scrapy.Field()
article_id = scrapy.Field()
author_id = scrapy.Field()
author_name = scrapy.Field()
site_name = scrapy.Field()
|
{"/crawler/pixnet/pipelines.py": ["/lib/pixnetdb.py", "/lib/lib.py"], "/crawler/pixnet/test/test_pixnetdb.py": ["/lib/pixnetdb.py", "/lib/lib.py"]}
|
27,484
|
etups/PixHack2016
|
refs/heads/master
|
/neuralnetwork/rnn.py
|
import tensorflow as tf
from tensorflow.models.rnn import rnn, rnn_cell
import numpy as np
from softmax import softmax
# Simulate data
one_hot = {'a': [1,0,0,0,0,0,0],
'b': [0,1,0,0,0,0,0],
'c': [0,0,1,0,0,0,0],
'd': [0,0,0,1,0,0,0],
'e': [0,0,0,0,1,0,0],
'f': [0,0,0,0,0,1,0],
'g': [0,0,0,0,0,0,1]}
w2v = {'a': [0,0,0,0,0],
'b': [0,0,0.1,0,0.1],
'c': [0,0,0,0.1,0.1],
'd': [0.1,0.1,0,0,0],
'e': [-0.1,0,0.1,-0.1,0],
'f': [0,-0.1,0,0,0.1],
'g': [0,0,-0.2,0,-0.1]}
text = 'a b c a b d e g f c a b e d f f f g e b c d a b e g f d a a b c e g f c a b c d'.split(' ')
train = np.array([w2v[w] for w in text])
label = [one_hot[text[i+1]] for i in range(len(text) - 1)]
label.append([0,0,0,0,0,0,0])
label = np.array(label)
test = 'a b c d e f g a b c'.split(' ')
test = np.array([w2v[w] for w in test])
'''
input vecter: x.dot(L)
input weight: W
hidden
'''
# Parameters
training_iter = 10000
batch_size = 3
display_step = 10
dim_input = 5
dim_hidden = 2
num_steps = 10
num_words = 7
# Placehold
x = tf.placeholder('float', [None, num_steps, dim_input])
y = tf.placeholder('float', [None, num_words])
# Weights
weights = {
'out': tf.Variable(tf.random_normal([dim_hidden, num_words]))
}
biases = {
'out': tf.Variable(tf.random_normal([num_words]))
}
def RNN(x, weight, biases):
# shape of input x: [batch_size, num_steps, dim_input]
x = tf.transpose(x, [1, 0, 2])
x = tf.reshape(x, [-1, dim_input])
x = tf.split(0, num_steps, x)
lstm_cell = rnn_cell.BasicLSTMCell(dim_hidden, forget_bias=1.0)
outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
return tf.matmul(outputs[-1], weight['out']+biases['out'])
pred = RNN(x, weights, biases)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# initializing
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
step = 1
i = 0
while step*batch_size < training_iter:
batch_x = train[i*batch_size*num_steps:((i+1)*batch_size*num_steps)]
batch_x = batch_x.reshape((batch_size, num_steps, dim_input))
batch_y = label[i*batch_size:((i+1)*batch_size)]
if (i+1)*batch_size >= len(train):
i = 0
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
if step % display_step == 0:
acc = sess.run(accuracy, feed_dict={x: batch_x, y: batch_y})
loss = sess.run(cost, feed_dict={x: batch_x, y: batch_y})
print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc)
step += 1
test = test.reshape((1, num_steps, dim_input))
test_pred = sess.run(pred, feed_dict={x: test})
print softmax(test_pred)
|
{"/crawler/pixnet/pipelines.py": ["/lib/pixnetdb.py", "/lib/lib.py"], "/crawler/pixnet/test/test_pixnetdb.py": ["/lib/pixnetdb.py", "/lib/lib.py"]}
|
27,485
|
etups/PixHack2016
|
refs/heads/master
|
/count.py
|
from lib.pixnetdb import PixnetDB
def main():
db = PixnetDB()
count = db.get_article_count()
print "PixnetDB contains %d articles." % count
count = db.get_author_count()
print "PixnetDB contains %d authors." % count
if __name__ == '__main__':
main()
|
{"/crawler/pixnet/pipelines.py": ["/lib/pixnetdb.py", "/lib/lib.py"], "/crawler/pixnet/test/test_pixnetdb.py": ["/lib/pixnetdb.py", "/lib/lib.py"]}
|
27,486
|
etups/PixHack2016
|
refs/heads/master
|
/neuralnetwork/sigmoid.py
|
import numpy as np
def sigmoid(x):
x = 1. / (1 + np.exp(-x))
return x
def grad_sigmoid(f):
f = f * (1-f)
return f
|
{"/crawler/pixnet/pipelines.py": ["/lib/pixnetdb.py", "/lib/lib.py"], "/crawler/pixnet/test/test_pixnetdb.py": ["/lib/pixnetdb.py", "/lib/lib.py"]}
|
27,487
|
etups/PixHack2016
|
refs/heads/master
|
/lib/pixnetdb.py
|
import sqlite3
import os
import re
import logging
import time
from lib import reverse_url
class PixnetDB(object):
sql_files = {
"pixnet_aritcles": "create_articles_table.sql",
"pixnet_authors": "create_authors_table.sql"
}
db_name = 'pixnet.db'
def __init__(self):
pixhak_path = os.environ.get('PIXHACK_PATH')
if pixhak_path:
db_path = os.path.join(pixhak_path, self.db_name)
else:
db_path = db_name
self.sql_conn = sqlite3.connect(db_path)
self._create_tables(self.sql_conn.cursor())
def _create_tables(self, cursor):
for name, filename in self.sql_files.items():
self._create_table(name, filename, cursor)
def _create_table(self, tablename, filename, cursor):
logging.info("Create table %s.", tablename)
with open(self._get_sql_file(filename), 'r') as f:
sqlContext = f.read()
cursor.execute(sqlContext)
self.sql_conn.commit()
def _get_sql_file(self, name):
sql_dir = os.environ.get('SQL_PATH')
if sql_dir:
return os.path.join(sql_dir, name)
else:
dirname = os.path.dirname(os.path.realpath(__file__))
return os.path.join(dirname, 'sql', name)
def exist_article_link(self, link):
rlink = reverse_url(link)
c = self.sql_conn.cursor()
c.execute("SELECT link FROM pixnet_aritcles WHERE link = ?", (rlink, ))
data = c.fetchone()
if data is None:
return False
else:
return True
def exist_article_id(self, article_id):
c = self.sql_conn.cursor()
c.execute("SELECT link FROM pixnet_aritcles WHERE article_id = ?", (article_id, ))
data = c.fetchone()
if data is None:
return False
else:
return True
def store_article_data(self, data):
c = self.sql_conn.cursor()
link = data[1]
if self.exist_article_link(link):
pass
else:
c.execute("INSERT INTO pixnet_aritcles VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", data)
self.sql_conn.commit()
def get_article_count(self):
c = self.sql_conn.cursor()
c.execute("SELECT count(*) FROM pixnet_aritcles")
return c.fetchone()
def get_all_aritcle_data(self):
c = self.sql_conn.cursor()
c.execute("SELECT * FROM pixnet_aritcles")
return c.fetchall()
def get_article_data(self, article_id):
c = self.sql_conn.cursor()
sql = "SELECT * FROM pixnet_aritcles WHERE article_id = ?"
c.execute(sql, (article_id, ))
return c.fetchone()
def get_articles(self):
c = self.sql_conn.cursor()
sql = "SELECT content FROM pixnet_aritcles"
c.execute(sql)
return c.fetchall()
def get_articles_by(self, column, condition):
c = self.sql_conn.cursor()
if re.match('[\w_]+', column):
sql = "SELECT content FROM pixnet_aritcles WHERE %s = ?" % (column)
c.execute(sql, (condition, ))
return c.fetchall()
else:
logging.error("Column name contains invalid characters.")
return None
def delete_article_data(self, article_id):
c = self.sql_conn.cursor()
sql = "DELETE FROM pixnet_aritcles WHERE article_id = ?"
c.execute(sql, (article_id, ))
self.sql_conn.commit()
def exist_author(self, author_id):
c = self.sql_conn.cursor()
c.execute("SELECT * FROM pixnet_authors WHERE author_id = ?", (author_id, ))
data = c.fetchone()
if data is None:
return False
else:
return True
def store_author_data(self, data):
c = self.sql_conn.cursor()
author_id = data[0]
if self.exist_author(author_id):
self.update_author_data(data)
else:
c.execute("INSERT INTO pixnet_authors VALUES (?, ?, ?, ?, ?, ?, ?)", data)
self.sql_conn.commit()
def get_all_author_data(self):
c = self.sql_conn.cursor()
c.execute("SELECT * FROM pixnet_authors")
return c.fetchall()
def get_author_data(self, author_id):
c = self.sql_conn.cursor()
sql = "SELECT * FROM pixnet_authors WHERE author_id = ?"
c.execute(sql, (author_id, ))
return c.fetchone()
def get_author_count(self):
c = self.sql_conn.cursor()
c.execute("SELECT count(*) FROM pixnet_authors")
return c.fetchone()
def update_author_data(self, data):
sql = "UPDATE pixnet_authors SET last_update_date = ?, last_article_link = ? WHERE author_id = ?"
c = self.sql_conn.cursor()
author_id = data[0]
last_update = data[5]
last_article_link = data[6]
c.execute(sql, (last_update, last_article_link, author_id))
self.sql_conn.commit()
def delete_author_data(self, author_id):
c = self.sql_conn.cursor()
sql = "DELETE FROM pixnet_authors WHERE author_id = ?"
c.execute(sql, (author_id, ))
self.sql_conn.commit()
def close(self):
self.sql_conn.close()
|
{"/crawler/pixnet/pipelines.py": ["/lib/pixnetdb.py", "/lib/lib.py"], "/crawler/pixnet/test/test_pixnetdb.py": ["/lib/pixnetdb.py", "/lib/lib.py"]}
|
27,488
|
etups/PixHack2016
|
refs/heads/master
|
/crawler/pixnet/test/test_blog.py
|
#!/usr/bin/python
import os
import sys
import unittest
from BeautifulSoup import BeautifulSoup
sys.path.insert(1, os.path.join(sys.path[0], '../../'))
from pixnet.spiders.blog import BlogSpider
class BlogTest(unittest.TestCase):
def setUp(self):
self.spider = BlogSpider()
dirname = os.path.dirname(os.path.realpath(__file__))
test_file_1 = os.path.join(dirname, 'testcase/329049123.html')
with open(test_file_1, 'r') as f:
html = f.read()
self.soup1 = BeautifulSoup(html)
test_file_2 = os.path.join(dirname, 'testcase/43919692.html')
with open(test_file_2, 'r') as f:
html = f.read()
self.soup2 = BeautifulSoup(html)
def test_is_secret_aritcle(self):
self.assertTrue(self.spider._is_secret_aritcle(self.soup1))
self.assertFalse(self.spider._is_secret_aritcle(self.soup2))
def test_get_next_link(self):
self.assertEqual("", self.spider._get_next_link(self.soup1))
self.assertEqual("", self.spider._get_next_link(self.soup2))
def test_get_prev_link(self):
self.assertEqual("", self.spider._get_prev_link(self.soup1))
self.assertEqual("http://bajenny.pixnet.net/blog/post/43823356-2016%e5%ae%9c%e8%98%ad%e5%9c%8b%e9%9a%9b%e7%ab%a5%e7%8e%a9%e7%af%80%7e%e7%ab%a5%e7%8e%a9%e7%af%80%e5%ae%9c%e8%98%ad%e6%b0%91%e5%ae%bf-%e5%ae%9c%e8%98%ad%e9%a3%af%e5%ba%97", \
self.spider._get_prev_link(self.soup2))
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
{"/crawler/pixnet/pipelines.py": ["/lib/pixnetdb.py", "/lib/lib.py"], "/crawler/pixnet/test/test_pixnetdb.py": ["/lib/pixnetdb.py", "/lib/lib.py"]}
|
27,493
|
ChicoState/DonateNeed
|
refs/heads/master
|
/mysite/myapp/urls.py
|
from django.conf.urls import url
from dal import autocomplete
from django.urls import path, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.auth import views as auth_views
from django.conf import settings
from django.conf.urls.static import static
from . import views
from .models import Cause
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^agencies$', views.agencies, name='agencies'),
url(r'^trending$', views.trending, name='trending'),
url(r'^about$', views.about, name='about'),
url('login/', auth_views.LoginView.as_view(), name='login'),
url(r'^postSignIn/', views.postSignIn, name='postsignin'),
url('logout/', views.logout_view, name='logout'),
url(r'^signUp$', views.signUp, name='signUp'),
url(r'^agencySignUp/', views.agencySignUp, name='agencySignUp'),
url(r'^profile/(?P<username>.+)/', views.profile, name='profile'),
url(r'^agencyRequestedDonations/(?P<username>.+)/', views.agencyRequestedDonations, name='agencyRequestedDonations'),
url(r'^agencyRequestedDonations/', views.agencyRequestedDonations, name='agencyRequestedDonations'),
url(r'^agencyRequestedVolunteers/(?P<username>.+)/', views.agencyRequestedVolunteers, name='agencyRequestedVolunteers'),
url(r'^agencyRequestedVolunteers/', views.agencyRequestedVolunteers, name='agencyRequestedVolunteers'),
url(r'^addRequests/(?P<username>.+)/', views.addRequests, name='addRequests'),
url(r'^addVolunteerRequest/(?P<username>.+)/', views.addVolunteerRequest, name='addVolunteerRequest'),
url(r'^createProfile/', views.createProfile, name='createProfile'),
url(r'^agencyProfile/(?P<uname>.+)', views.agencyProfile, name='agencyProfile'),
url(r'^createCause/', views.createCause, name='createCause'),
url(r'^addAgency/(?P<username>.+)', views.addAgency, name='addAgency'),
url(r'^pledgeSupport/(?P<username>.+)/', views.pledgeSupport, name='pledgeSupport'),
url(r'^activeCauses/', views.activeCauses, name='activeCauses'),
url(r'^cause/(?P<uname>.+)', views.causePage, name='causePage'),
url(r'^activeDonations/', views.activeDonations, name='activeDonations'),
url(r'^donationPredictor/', views.donationPredictor, name='donationPredictor'),
url(r'^activeVolunteerRequests/', views.activeVolunteerRequests, name='activeVolunteerRequests'),
url(r'^search', views.search, name='search'),
path('serve_shiny/', include('serve_shiny.urls')),
url(r'^finalSubmitDonation/(?P<id>.+)/', views.finalSubmitDonation, name='finalSubmitDonation'),
url(r'^PledgeToVolunteer/(?P<id>.+)/', views.PledgeToVolunteer, name='PledgeToVolunteer'),
url('test-autocomplete/$', autocomplete.Select2QuerySetView.as_view(model=Cause), name='select2_fk',),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
{"/mysite/myapp/urls.py": ["/mysite/myapp/models.py"]}
|
27,494
|
ChicoState/DonateNeed
|
refs/heads/master
|
/mysite/myapp/migrations/0001_initial.py
|
# Generated by Django 2.2.5 on 2020-10-05 20:13
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import phone_field.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Agencies',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=50)),
('address', models.CharField(max_length=100)),
('url', models.URLField()),
('phone', phone_field.models.PhoneField(max_length=31)),
('username', models.CharField(blank=True, max_length=100, null=True, unique=True)),
('picture', models.ImageField(blank=True, default='defaultProfilePic.jpg', null=True, upload_to='media/')),
],
),
migrations.CreateModel(
name='Cause',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('location', models.CharField(max_length=100)),
('username', models.CharField(max_length=100, null=True)),
],
),
migrations.CreateModel(
name='News_Articles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('picture', models.URLField(blank=True, max_length=100, null=True)),
('url', models.URLField(max_length=100)),
('title', models.CharField(max_length=100, null=True)),
('description', models.CharField(max_length=1000, null=True)),
],
),
migrations.CreateModel(
name='Request_In_Progress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item', models.CharField(max_length=250, null=True)),
('amount_total', models.DecimalField(decimal_places=2, max_digits=10)),
('amount_fulfilled', models.DecimalField(decimal_places=2, default=0, max_digits=10)),
('is_complete', models.BooleanField(default=False)),
('date_requested', models.DateField(auto_now_add=True)),
('agency', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='myapp.Agencies')),
],
),
migrations.CreateModel(
name='Request_Fulfilled',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fulfilled_amount', models.DecimalField(decimal_places=2, max_digits=10)),
('promised_amount', models.DecimalField(decimal_places=2, max_digits=10)),
('promised_arrival', models.DateField()),
('request_in_progress', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='myapp.Request_In_Progress')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.TextField(blank=True, max_length=500)),
('picture', models.ImageField(blank=True, default='defaultProfilePic.jpg', null=True, upload_to='media/')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='agencies',
name='admin_users',
field=models.ManyToManyField(blank=True, to='myapp.Profile'),
),
migrations.AddField(
model_name='agencies',
name='causes',
field=models.ManyToManyField(blank=True, to='myapp.Cause'),
),
]
|
{"/mysite/myapp/urls.py": ["/mysite/myapp/models.py"]}
|
27,495
|
ChicoState/DonateNeed
|
refs/heads/master
|
/mysite/scrapper.py
|
# import libraries
import os
import django
from django.core.files import File
os.environ["DJANGO_SETTINGS_MODULE"] = 'mysite.settings'
django.setup()
from myapp import models
#import cssutils
import urllib.request
from urllib.request import urlopen
import time
from bs4 import BeautifulSoup
i = 1
def getIntro(url):
with urllib.request.urlopen(url) as page:
html = page.read()
soup = BeautifulSoup(html, "html.parser")
intro = soup.find("h2", attrs={"class":"Heading__HeadingStyled-sc-1w5xk2o-0-h2 eFMthj Heading-sc-1w5xk2o-1 HeroArticle__ArticleDeck-sc-1gllxr3-0 juNLjK"})
if(intro):
return intro.get_text()
def getPicture(url):
with urllib.request.urlopen(url) as page:
html = page.read()
soup = BeautifulSoup(html, "html.parser")
picture = soup.find("figure")
#print(picture)
selectors = {}
#picture = soup.find("div", attrs={"class":"BackdropImage-container-2S9Qf"})
if(picture):
picture = picture.prettify()
# print(picture)
# picture = soup.find_all("div", {'class': lambda s: 'BackdropImage' in s})
# print(picture)
#print(picture.select('style'))
#
# for styles in picture.select('style'):
#
# print(styles)
# css = cssutils.parseString(styles.encode_contents())
# for rule in css:
# if rule.type == rule.STYLE_RULE:
# style = rule.selectorText
# selectors[style] = {}
# for item in rule.style:
# propertyname = item.name
# value = item.value
# selectors[style][propertyname] = value
# print(value)
#pic = picture.find("div", attrs={"class": "BackdropImage-container-2S9Qf"})
#pic = picture.find_all("div")
#print(pic)
# if(picture['src'] == 'https://ftp2.actionnewsnow.com/Watches%20and%20Warnings.jpg'):
# # picture['src'] = 'https://www.realmilkpaint.com/wp-content/uploads/SoftWhite_Edited_2018.jpg'
# return None
# return picture['src']
return picture
while(1):
print("Starting Up")
j = str(i)
print(j)
# specifiy the url
#quote_page = 'https://www.actionnewsnow.com/news/local/'
quote_page = 'https://www.reuters.com/news/archive/tsunami?view=page&page=' + j
#query the website and return the html to the variable 'page'
#html = urlopen(quote_page, timeout=10).read()
with urllib.request.urlopen(quote_page, timeout=10) as page:
html = page.read()
# parse the html using BeautifulSoup
soup = BeautifulSoup(html, "html.parser")
betterSoup = soup.prettify()
# Find individual newsArticles
#newsArticles = soup.find_all("h2", attrs={"class": "entry-title"})
soup2 = soup.find("div", attrs={"class": "column1 col col-10"})
newsArticles = soup2.find_all("article", attrs={"class": "story"})
# Scrape topNews
images = soup.find_all("a")
results = open('record.txt', 'w')
#
for article in newsArticles:
url = 'https://www.reuters.com'+(article.find('a').get('href'))
if models.News_Articles.objects.filter(url = url).count() == 0:
art = models.News_Articles(url=quote_page+(article.find('a').get('href')), title=article.find('a').get_text())
title = article.find('h3', attrs={"class": "story-title"})
title = title.text
title = title.lstrip()
url = 'https://www.reuters.com'+(article.find('a').get('href'))
art.url = url
art.title = title
description = article.find('p').text
art.description = description
image = article.find('img').get('org-src')
art.picture = image
art.save()
print("[", art.id, "]", art.title)
print("-------------------------------------------")
print("Resting")
i = i+1
time.sleep(5)
|
{"/mysite/myapp/urls.py": ["/mysite/myapp/models.py"]}
|
27,496
|
ChicoState/DonateNeed
|
refs/heads/master
|
/mysite/myapp/views.py
|
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import logout
from django.db.models import Q
from cities_light.models import Country, City
import re, string
import geoip2.database
import geopy.distance
from geopy.geocoders import Nominatim
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
from sklearn.metrics import r2_score
import seaborn as sns
from imblearn.over_sampling import SMOTE
from django.core import serializers
from . import forms
from myapp.forms import AgencyForm, ProfileForm, HideCompletedRequestsForm, AddVolunteerRequestForm
from myapp.models import Profile, Cause, News_Articles, Agencies, Request_Fulfilled, Request_In_Progress, Volunteering, Social_Media_Post, Agency_Social_Media_Post
from . import models
import json
# Helper functions
def checkAuth(request):
if(request.user.is_authenticated):
return True
else:
return False
# Create your views here.
def home(request):
# df = pd.DataFrame(list(Request_In_Progress.objects.all().values()))
# filt = Request_In_Progress.objects.all().values_list('cause_id', flat=True)
# df2 = pd.DataFrame(list(Cause.objects.all().filter(id__in = filt)))
#cs = Cause.objects.all().filter(id__in=filt)
#cause = models.Cause.objects.all().filter(id__in=cause)
#.values_list('')
#print(cause.type_of_cause)
#cause = list(cause.cause_id.type_of_cause)
#df2 = pd.DataFrame(list(cause))
# print("df first: ")
# print(df)
# print(df2)
#
# X = df[['type_of_cause']]
# y = df[['cause_id']]
# #
# # sns.countplot(x='item',data=X)
# # plt.title('Items Requested')
# # plt.show()
# #
# # X = A.loc[:,A.columns != 'cause_id']
# # y = A.loc[:,A.columns == 'cause_id']
# X.item = le.fit_transform(X.item)
# print(X.item)
#
# from imblearn.over_sampling import SMOTE
#
# # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
# #
# # smt = SMOTE(random_state=0)
# # data_X,data_y=smt.fit_sample(X_train, y_train)
#
# # sns.countplot(x='cause_id',data=data_y)
# # plt.title("Cause ID's")
# # plt.show()
# #
#
#
#
#
#
# X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.3, random_state=0)
# #
# # # Fit or Train the Model
# lr_model = LogisticRegression()
# lr_model.fit(X_train,y_train)
#
# score = lr_model.score(X_train,y_train)
# print(f"Accuracy with train data : {score:0.2f}")
#
# # Evaluate Model using test data
# y_pred =lr_model.predict(X_test)
#
# # Find out accuracy with test data
# r2score = r2_score(y_test,y_pred)
# print(f"Accuracy with test data : {r2score:0.2f}")
#
# # Pickle model
# pd.to_pickle(lr_model,'lr_model.pickle')
title = "Home "
articles = models.News_Articles.objects.all().order_by('-picture')
articles = articles[:4]
Agenciess = models.Agencies.objects.all()[:6]
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
#try:
reader = geoip2.database.Reader('../GeoLite2-City_20201013/GeoLite2-City.mmdb')
ip = '24.94.15.83'
response = reader.city(ip)
# print(response.country.iso_code)
# print(response.country.name)
# print(response.country.names['zh-CN'])
# print(response.subdivisions.most_specific.name)
# print(response.subdivisions.most_specific.iso_code)
# print(response.city.name)
# print(response.postal.code)
# print(response.location.latitude)
# print(response.location.longitude)
location1 = (response.location.latitude, response.location.longitude)
geolocator = Nominatim(user_agent="my_user_agent")
loc = geolocator.geocode("Cleveland, OH", exactly_one=False)[0]
location2 = (loc.latitude, loc.longitude)
print(City.objects.filter(latitude=46.97537))
distance = geopy.distance.distance(location1, location2).miles
print(distance)
reader.close()
# except:
# pass
context = {
"user" : request.user,
"title": title,
"articles": articles,
"agencies": Agenciess,
"ranger": range(0, 5),
"is_user": checkAuth(request),
}
return render(request, 'main/index.html', context=context)
def agencies(request):
# # Unpickle model
# model = pd.read_pickle('lr_model.pickle')
#
# # Take input from user
# gre = int(input("Enter Item : "))
# tof = int(input("Enter Amount Total : "))
#
# # Predict chances
# result = model.predict([[gre,tof,cgpa]]) # input must be 2D array
# print(f"Chances are : {result[0] * 100:.2f}%")
title = "Agencies "
Agenciess = models.Agencies.objects.all()
agency_cities = Agencies.objects.values_list('city', flat=True)
print("here are the cities: ")
print(agency_cities)
cities = City.objects.all().filter(id__in = agency_cities)
if request.method == 'POST':
city_id = request.POST['city_id']
if city_id is not "":
selected_item = get_object_or_404(City, pk=request.POST.get('city_id'))
Agenciess = Agencies.objects.filter(city=selected_item)
context = {
"title": title,
"cities": cities,
"agencies": Agenciess,
"ranger": range(0, 3),
"is_user": checkAuth(request),
}
return render(request, 'main/agencies.html', context=context)
def trending(request):
title = "Trending News "
articles = models.News_Articles.objects.all().order_by('-picture')
# articles = articles[:10]
context = {
"title": title,
"articles": articles,
"ranger": range(0, 5),
"is_user": checkAuth(request),
}
return render(request, 'main/trending.html', context=context)
def about(request):
title = "About Us "
context = {
"title": title,
"is_user": checkAuth(request),
}
return render(request, 'main/about.html', context = context)
def signIn(request):
title = "Sign In "
context = {
"title": title,
"is_user": checkAuth(request),
}
return render(request, "main/signIn.html", context = context)
def postSignIn(request):
signedIn = True
title = "Welcome "
is_user = request.POST.get('is_user')
passw = request.POST.get("pass")
user = authenticate(request, is_user=is_user, password=passw)
if user is None:
title = "Invalid "
message = "invalid credentials"
signedIn = False
context = {
"title": title,
"msg": message,
"is_user": checkAuth(request),
}
return render(request, "main/signIn.html", context = context)
context = {
"title": title,
"e": is_user,
"signedIn": signedIn,
"is_user": checkAuth(request),
}
#get.session['uid']=str(session_id)
return HttpResponseRedirect("main/index.html")
def logout_view(request):
logout(request)
return HttpResponseRedirect("/login/")
def signUp(request):
title = "registration"
if request.method == "POST":
form_instance = forms.RegistrationForm(request.POST)
if form_instance.is_valid():
form_instance.save()
user = request.user
context = {
"user":user,
"signedIn": True,
"is_user": checkAuth(request),
}
return HttpResponseRedirect("/")
else:
form_instance = forms.RegistrationForm()
context = {
"form":form_instance,
"title": title,
"is_user": checkAuth(request),
}
return render(request, "registration/signUp.html", context = context)
def agencyProfile(request, uname=None):
delete = request.GET.get('delete', 0)
if delete != 0:
Request_In_Progress.objects.filter(id=delete).delete()
title = "Agency Profile"
if checkAuth(request) == False:
return HttpResponseRedirect("/")
agency = Agencies.objects.get(username=uname)
try:
posts = Agency_Social_Media_Post.objects.filter(author=agency).order_by('-date_posted')
print(posts)
if posts:
has_posts = True
print("here")
else:
has_posts=False
except:
has_posts = False
try:
agency = Agencies.objects.get(username=uname)
requests_in_progress = Request_In_Progress.objects.filter(is_complete=True, agency=agency).count()
requests_completed = Request_In_Progress.objects.filter(is_complete=False, agency=agency).count()
volunteering_request = Volunteering.objects.filter(agency=agency)
if request.user not in agency.admin_users.all():
is_admin = False
else:
is_admin = True
if request.method == "POST":
profile = Profile.objects.get(user=request.user)
completed_form = HideCompletedRequestsForm(request.POST, instance=profile)
if(completed_form.is_valid()):
completed_form.save()
try:
profile = Profile.objects.get(user=request.user)
is_hidden = HideCompletedRequestsForm(instance=profile)
except:
is_hidden = HideCompletedRequestsForm()
hidden_checked = is_hidden['requests_view_hide_completed'].value()
if hidden_checked:
requests = Request_In_Progress.objects.filter(is_complete=False, agency=agency)
else:
requests = Request_In_Progress.objects.filter(agency=agency)
instance = models.Profile.objects.get(user=request.user)
causes = agency.causes
if request.user in agency.admin_users.all():
is_personal_agency = True
else:
is_personal_agency = False
is_agency = True
if agency.only_volunteer:
volunteer_only = True
else:
volunteer_only = False
context = {
"title": title,
"volunteer_only": volunteer_only,
"is_user": checkAuth(request),
"user": request.user,
"username": uname,
"has_posts": has_posts,
"posts": posts,
"requests": requests,
"is_agency":is_agency,
"is_admin": is_admin,
"requests_in_progress": requests_in_progress,
"requests_completed": requests_completed,
"volunteering_request": volunteering_request,
"is_hidden": is_hidden,
"agency": agency,
"causes": causes,
"is_personal_agency": is_personal_agency
}
return render(request, 'main/agencyProfile.html', context=context)
except models.Agencies.DoesNotExist:
is_agency = False
is_personal_agency = False
context = {
"title": title,
"is_user": checkAuth(request),
"user": request.user,
"has_posts": has_posts,
"posts": posts,
"is_agency": is_agency,
"is_personal_agency": is_personal_agency,
"username": uname,
}
return render(request, 'main/agencyProfile.html', context=context)
def agencySignUp(request):
signedIn = True
is_user = request.POST.get('is_user')
passw = request.POST.get("pass")
user = authenticate(request, is_user=is_user, password=passw)
if request.method == "POST":
form_instance = forms.AgencyForm(request.POST, request.FILES)
if form_instance.is_valid():
instance = form_instance.save(commit = False)
instance.user = request.user
name = string.capwords(instance.name)
uname = re.sub(r"\s+", "", name)
instance.username = uname
instance.save()
instance.admin_users.add(request.user)
return redirect('agencyProfile', uname=uname)
#return redirect("main/agencySignUp.html")
else:
form_instance = forms.AgencyForm()
context = {
"form" : form_instance,
"e": is_user,
"signedIn": signedIn,
"is_user": checkAuth(request),
}
return render(request, 'main/agencySignUp.html', context=context)
def profile(request, username=None):
title = "Profile"
if checkAuth(request) == False:
return HttpResponseRedirect("/")
has_posts = False
posts = []
has_agency = False
user_agency = []
has_event = False
user_events = []
try:
user_info = models.User.objects.get(username=username)
if user_info == request.user:
is_personal_profile = True
user = request.user
profile = Profile.objects.get(user=request.user)
print(profile.number_of_donations)
print(profile.number_of_volunteering_participations)
all_agencies = Agencies.objects.all()
for agency in all_agencies:
if request.user in agency.admin_users.all():
has_agency = True
user_agency.append(agency)
user_volunteering = Volunteering.objects.all()
for v in user_volunteering:
if request.user in v.volunteers.all():
has_event = True
user_events.append(v)
try:
posts = Social_Media_Post.objects.filter(author=request.user).order_by('-date_posted')
if posts:
has_posts = True
except:
has_posts = False
else:
is_personal_profile = False
is_an_account = True
context = {
"title": title,
"is_user": checkAuth(request),
"user": request.user,
"has_posts": has_posts,
"posts": posts,
"username": username,
"has_event": has_event,
"user_events": user_events,
"user_agency": user_agency,
"is_an_account":is_an_account,
"user_info": user_info,
"has_agency": has_agency,
"is_personal_profile": is_personal_profile,
}
return render(request, 'main/profile.html', context=context)
except models.User.DoesNotExist:
return HttpResponseRedirect("/")
def createProfile(request):
title = "Create Profile"
signedIn = True
is_user = request.POST.get('is_user')
passw = request.POST.get("pass")
if checkAuth(request) == False:
return HttpResponseRedirect("/")
user = authenticate(request, is_user=is_user, password=passw)
instance = get_object_or_404(Profile, user=request.user)
if request.method == "POST":
form_instance = forms.ProfileForm(request.POST, request.FILES, instance=instance)
if form_instance.is_valid():
instance = form_instance.save(commit=False)
instance.user = request.user
username = instance.user.username
instance.save()
return redirect('profile', username=username)
else:
form_instance = forms.ProfileForm()
context = {
"form":form_instance,
"title": title,
"is_user": checkAuth(request),
}
return render(request, "main/createProfile.html", context = context)
def createCause(request):
title = "Create Cause"
signedIn = True
is_user = request.POST.get('is_user')
passw = request.POST.get("pass")
if checkAuth(request) == False:
return HttpResponseRedirect("/")
if request.method == "POST":
form_instance = forms.CauseForm(request.POST)
if form_instance.is_valid():
instance = form_instance.save(commit=False)
title = instance.title
instance.username = re.sub(r"\s+", "", title)
print(instance.username)
instance.save()
return HttpResponseRedirect("/")
else:
form_instance = forms.CauseForm()
context = {
"form" : form_instance,
"e": is_user,
"signedIn": signedIn,
"is_user": checkAuth(request),
}
return render(request, 'main/createCause.html', context=context)
def pledgeSupport(request, username=None):
agency = Agencies.objects.get(username=username)
if checkAuth(request) == False:
return HttpResponseRedirect("/")
if request.user not in agency.admin_users.all():
return HttpResponseRedirect("/")
if request.method == "POST":
form_instance = forms.PledgeSupportForm(request.POST, instance=agency)
if form_instance.is_valid():
ids = request.POST.get('causes')
for id in ids:
agency.causes.add(id)
cs = Cause.objects.filter(id=id)[0]
txt = agency.name + " pledged their support for "+ cs.title + " on " + str(datetime.now())
agency_url = agency.username
agency_name = agency.name
cause_url = cs.username
cause_name = cs.title
type="agency pledge"
Agency_Social_Media_Post.objects.create(author=agency, text=txt, agency_profile=agency_url, agency_name=agency_name, cause_profile=cause_url, cause_name=cause_name, type=type)
causes = agency.causes.all()
context = {
"username": username,
"is_agency": True,
"user": request.user,
"agency": agency,
"is_user": checkAuth(request),
"causes": causes,
"is_personal_agency": True
}
return render(request, 'main/agencyProfile.html', context=context)
else:
form_instance = forms.PledgeSupportForm()
context = {
"form" : form_instance,
"username": username,
"is_user": checkAuth(request),
}
return render(request, 'main/pledgeSupport.html', context=context)
def addAgency(request, username=None):
agency = Agencies.objects.get(username=username)
if checkAuth(request) == False:
return HttpResponseRedirect("/")
if request.method == "POST":
form_instance = forms.AddAgencyForm(request.POST, instance=agency)
if form_instance.is_valid():
f_instance = form_instance.save(commit=False)
users = form_instance.cleaned_data['admin_users']
for user in users:
agency.admin_users.add(user)
agency.save()
return redirect('/')
else:
form_instance = forms.AddAgencyForm()
context = {
"form" : form_instance,
"instance": agency,
"username": username,
"is_user": checkAuth(request),
}
#return redirect(addAgency, username=username)
return render(request, 'main/addAgency.html', context=context)
def activeCauses(request):
reader = geoip2.database.Reader('../GeoLite2-City_20201013/GeoLite2-City.mmdb')
ip = '24.94.15.83'
response = reader.city(ip)
print(response.city.geoname_id)
location1 = (response.location.latitude, response.location.longitude)
geolocator = Nominatim(user_agent="my_user_agent")
loc = geolocator.geocode("Chico, CA", exactly_one=False)[0]
location2 = (loc.latitude, loc.longitude)
distance = geopy.distance.distance(location1, location2).miles
print(distance)
reader.close()
cause = Cause.objects.all()
cause_cities = Cause.objects.values_list('location', flat=True)
print("here are the cities: ")
print(cause_cities)
cities = City.objects.all().filter(id__in = cause_cities)
if request.method == 'POST':
city_id = request.POST.get('city_id')
if city_id is not "":
selected_item = get_object_or_404(City, pk=request.POST.get('city_id'))
cause = Cause.objects.filter(location=selected_item)
context = {
"Cause": cause,
"cities": cities,
"is_user": checkAuth(request)
}
return render(request, 'main/activeCauses.html', context=context)
def causePage(request, uname=None):
title = "Cause"
username = re.sub(r"\s+", "", uname)
try:
cause_info = Cause.objects.get(username=username)
requests = Request_In_Progress.objects.filter(cause=cause_info.id)
article1 = News_Articles.objects.filter(description__contains=uname)
article2 = News_Articles.objects.filter(title__contains=uname)
agencies = Agencies.objects.filter(causes=cause_info)
#if request.user not in agency.admin_users.all():
if request.method == 'POST':
agency_id = request.POST.get('agency_id')
if agency_id is not "":
selected_item = get_object_or_404(Agencies, pk=request.POST.get('agency_id'))
requests = Request_In_Progress.objects.filter(agency=selected_item, cause=cause_info.id)
articles = article1 | article2
is_cause = True
context = {
"title": title,
"is_user": checkAuth(request),
"user": request.user,
"uname": uname,
"agencies": agencies,
"is_cause": is_cause,
"articles": articles,
"requests": requests,
"cause_info": cause_info,
}
return render(request, 'main/cause.html', context=context )
except models.Cause.DoesNotExist:
return redirect('activeCauses')
def agencyRequestedDonations(request, username=None):
if checkAuth(request) == False:
return HttpResponseRedirect("/")
if(username is None):
requests = Request_In_Progress.objects.all()
is_admin = False
agency = "All Agency"
else:
agency = Agencies.objects.filter(username=username)[0]
if request.user in agency.admin_users.all():
is_admin = True
else:
is_admin = False
requests = Request_In_Progress.objects.filter(agency=agency)
delete = request.GET.get('delete', 0)
context = {
"agency": agency,
"username": username,
"user": request.user,
"is_admin": is_admin,
"is_user": checkAuth(request),
"requests": requests,
}
if delete != 0:
Request_In_Progress.objects.filter(id=delete).delete()
return render(request, 'main/agencyRequestedDonations.html', context=context)
def addRequests(request, username):
if checkAuth(request) == False:
return HttpResponseRedirect("/")
agency = Agencies.objects.filter(username=username)[0]
if request.user not in agency.admin_users.all():
return HttpResponseRedirect("/")
if request.method == "POST":
form_instance = forms.AddRequestForm(request.POST)
if form_instance.is_valid():
instance = form_instance.save(commit=False)
cause = form_instance.cleaned_data['cause']
if cause not in agency.causes.all():
agency.causes.add(cause)
instance.agency = agency
amt = str(form_instance.cleaned_data['amount_total'])
txt = agency.name + " requested " + amt + " " + form_instance.cleaned_data['item'] + " on " + str(datetime.now()) + " for " + cause.title
agency_url = agency.username
agency_name = agency.name
cause_url = cause.username
cause_name = cause.title
type="agency add request"
Agency_Social_Media_Post.objects.create(author=agency, text=txt, agency_profile=agency_url, agency_name=agency_name, cause_profile=cause_url, cause_name=cause_name, type=type)
instance.save()
return redirect('activeDonations')
else:
form_instance = forms.AddRequestForm()
context = {
"form":form_instance,
"username": username,
"agency": agency,
"is_user": checkAuth(request),
}
return render(request, 'main/addRequests.html', context=context)
def agencyRequestedVolunteers(request, username=None):
if checkAuth(request) == False:
return HttpResponseRedirect("/")
if(username is None):
requests = Volunteering.objects.all()
is_admin = False
agency = "All Agency"
else:
agency = Agencies.objects.filter(username=username)[0]
if request.user in agency.admin_users.all():
is_admin = True
else:
is_admin = False
requests = Volunteering.objects.filter(agency=agency)
delete = request.GET.get('delete', 0)
context = {
"agency": agency,
"username": username,
"user": request.user,
"is_admin": is_admin,
"is_user": checkAuth(request),
"requests": requests,
}
if delete != 0:
Volunteering.objects.filter(id=delete).delete()
return render(request, 'main/agencyRequestedVolunteers.html', context=context)
def addVolunteerRequest(request, username):
if checkAuth(request) == False:
return HttpResponseRedirect("/")
agency = Agencies.objects.filter(username=username)[0]
if request.user not in agency.admin_users.all():
return HttpResponseRedirect("/")
if request.method == "POST":
form_instance = AddVolunteerRequestForm(request.POST)
if form_instance.is_valid():
instance = form_instance.save(commit=False)
cause = form_instance.cleaned_data['cause']
if cause not in agency.causes.all():
agency.causes.add(cause)
instance.agency = agency
amt = str(form_instance.cleaned_data['number_of_volunteers'])
txt = agency.name + " requested " + amt + " volunteers on " + str(datetime.now()) + " for " + cause.title
agency_url = agency.username
agency_name = agency.name
cause_url = cause.username
cause_name = cause.title
type="agency add volunteer request"
Agency_Social_Media_Post.objects.create(author=agency, text=txt, agency_profile=agency_url, agency_name=agency_name, cause_profile=cause_url, cause_name=cause_name, type=type)
instance.save()
context = {
"form":form_instance,
"username": username,
"agency": agency,
"is_user": checkAuth(request),
}
return redirect('activeVolunteerRequests')
else:
form_instance = AddVolunteerRequestForm()
context = {
"form":form_instance,
"username": username,
"agency": agency,
"is_user": checkAuth(request),
}
return render(request, 'main/addVolunteerRequest.html', context=context)
def activeDonations(request):
if checkAuth(request) == False:
return HttpResponseRedirect("/")
agencies = Agencies.objects.all()
causes = Cause.objects.all()
requests = Request_In_Progress.objects.filter(is_complete=False)
if request.method == 'POST':
agency_id = request.POST.get('agency_id')
cause_id = request.POST.get('cause_id')
if agency_id is not "":
if cause_id is not "":
selected_item = get_object_or_404(Agencies, pk=request.POST.get('agency_id'))
selected_cause = get_object_or_404(Cause, pk=request.POST.get('cause_id'))
requests = Request_In_Progress.objects.filter(agency=selected_item, cause=selected_cause, is_complete=False)
else:
selected_item = get_object_or_404(Agencies, pk=request.POST.get('agency_id'))
requests = Request_In_Progress.objects.filter(agency=selected_item, is_complete=False)
elif cause_id is not "":
selected_cause = get_object_or_404(Cause, pk=request.POST.get('cause_id'))
requests = Request_In_Progress.objects.filter(cause=selected_cause, is_complete=False)
user = request.user
context = {
"user": user,
"agencies": agencies,
"causes": causes,
"is_user": checkAuth(request),
"requests": requests
}
return render(request, 'main/activeDonations.html', context = context)
def activeVolunteerRequests(request):
if checkAuth(request) == False:
return HttpResponseRedirect("/")
agencies = Agencies.objects.all()
causes = Cause.objects.all()
requests = Volunteering.objects.all()
request_cities = Volunteering.objects.values_list('location', flat=True)
cities = City.objects.all().filter(id__in = request_cities)
if request.method == 'POST':
agency_id = request.POST.get('agency_id')
cause_id = request.POST.get('cause_id')
city_id = request.POST.get('city_id')
if agency_id is not "":
if cause_id is not "":
if city_id is not "":
selected_item = get_object_or_404(Agencies, pk=request.POST.get('agency_id'))
selected_cause = get_object_or_404(Cause, pk=request.POST.get('cause_id'))
selected_location = get_object_or_404(City, pk=request.POST.get('city_id'))
requests = Volunteering.objects.filter(agency=selected_item, cause=selected_cause, location=selected_location)
else:
selected_item = get_object_or_404(Agencies, pk=request.POST.get('agency_id'))
selected_cause = get_object_or_404(Cause, pk=request.POST.get('cause_id'))
requests = Volunteering.objects.filter(agency=selected_item, cause=selected_cause)
elif city_id is not "":
selected_location = get_object_or_404(City, pk=request.POST.get('city_id'))
selected_item = get_object_or_404(Agencies, pk=request.POST.get('agency_id'))
requests = Volunteering.objects.filter(agency=selected_item, location=selected_location)
else:
selected_item = get_object_or_404(Agencies, pk=request.POST.get('agency_id'))
requests = Volunteering.objects.filter(agency=selected_item)
elif cause_id is not "":
if city_id is not "":
selected_cause = get_object_or_404(Cause, pk=request.POST.get('cause_id'))
selected_location = get_object_or_404(City, pk=request.POST.get('city_id'))
requests = Volunteering.objects.filter(cause=selected_cause, location=selected_location)
else:
selected_cause = get_object_or_404(Cause, pk=request.POST.get('cause_id'))
requests = Volunteering.objects.filter(cause=selected_cause)
elif city_id is not "":
selected_location = get_object_or_404(City, pk=request.POST.get('city_id'))
requests = Volunteering.objects.filter(location=selected_location)
user = request.user
context = {
"user": user,
"agencies": agencies,
"causes": causes,
"cities": cities,
"is_user": checkAuth(request),
"requests": requests
}
return render(request, 'main/activeVolunteerRequests.html', context = context)
def finalSubmitDonation(request, id):
if checkAuth(request) == False:
return HttpResponseRedirect("/")
donation = Request_In_Progress.objects.filter(id=id)[0]
if request.method == "POST":
form_instance = forms.MakeDonation(request.POST)
if form_instance.is_valid():
prof = Profile.objects.filter(user=request.user)[0]
prof.number_of_donations+=1
prof.save()
instance = form_instance.save(commit=False)
instance.user = request.user
instance.request_in_progress = donation
instance.fulfilled_amount = 0
instance.save()
pledged = form_instance.cleaned_data['promised_amount']
fulfilled = donation.amount_fulfilled
total = donation.amount_total
donation.amount_fulfilled = pledged+fulfilled
donation.percent_complete = ((pledged+fulfilled)/total)*100
user = request.user
txt = user.first_name + user.last_name + "pledged to donate "+ str(pledged) + " " + donation.item +" for " + donation.agency.name + "'s help with " + donation.cause.title + " on" + str(datetime.now())
agency_url=donation.agency.username
agency_name=donation.agency.name
cause_url = donation.cause.username
cause_name = donation.cause.title
date_p = datetime.now()
type="donation"
Social_Media_Post.objects.create(author=request.user, text=txt, agency_profile=agency_url, agency_name=agency_name, date_posted=date_p, cause_profile=cause_url, cause_name=cause_name, type=type)
if(donation.amount_fulfilled == donation.amount_total):
donation.is_complete = True
donation.percent_complete = 100
txt = donation.agency.name + " successfully fulfilled their request for " + str(donation.amount_total) + " " + donation.item + " on " + str(datetime.now()) + " for " + donation.cause.title
type="agency complete donation"
Agency_Social_Media_Post.objects.create(author=donation.agency, text=txt, agency_profile=agency_url, agency_name=agency_name, cause_profile=cause_url, cause_name=cause_name, type=type)
donation.save()
context = {
"user": request.user,
"id": id,
"is_user": checkAuth(request),
"form": form_instance,
"donation": donation
}
return render(request, 'main/activeDonations.html', context=context)
else:
form_instance = forms.MakeDonation()
user = request.user
context = {
"user": user,
"id": id,
"is_user": checkAuth(request),
"form": form_instance,
"donation": donation
}
return render(request, 'main/finalSubmitDonation.html', context = context)
def PledgeToVolunteer(request, id):
if checkAuth(request) == False:
return HttpResponseRedirect("/")
VolunteerPledge = Volunteering.objects.filter(id=id)[0]
user = request.user
if request.user not in VolunteerPledge.volunteers.all():
prof = Profile.objects.filter(user=request.user)[0]
prof.number_of_volunteering_participations+=1
prof.save()
VolunteerPledge.volunteers.add(request.user)
VolunteerPledge.amount_fulfilled += 1;
VolunteerPledge.percent_complete += (1/VolunteerPledge.number_of_volunteers)
txt = user.first_name + user.last_name + "pledged to attend "+ VolunteerPledge.agency.name + "'s volunteering event for " + VolunteerPledge.cause.title + " on"
agency_url=VolunteerPledge.agency.username
agency_name=VolunteerPledge.agency.name
cause_url = VolunteerPledge.cause.username
cause_name = VolunteerPledge.cause.title
date_p = VolunteerPledge.date_needed
type="volunteer"
Social_Media_Post.objects.create(author=request.user, text=txt, agency_profile=agency_url, agency_name=agency_name, date_posted=date_p, cause_profile=cause_url, cause_name=cause_name, type=type)
if(VolunteerPledge.amount_fulfilled == VolunteerPledge.number_of_volunteers):
#donation.is_complete = True
VolunteerPledge.percent_complete = 100
txt = VolunteerPledge.agency.name + " successfully fulfilled their request for " + str(VolunteerPledge.number_of_volunteers) + " volunteers on " + str(datetime.now()) + " for " + VolunteerPledge.cause.title
type="agency complete volunteer"
Agency_Social_Media_Post.objects.create(author=VolunteerPledge.agency, text=txt, agency_profile=agency_url, agency_name=agency_name, cause_profile=cause_url, cause_name=cause_name, type=type)
VolunteerPledge.save()
context = {
"user": user,
"id": id,
"is_user": checkAuth(request),
"volunteer": VolunteerPledge
}
return render(request, 'main/PledgeToVolunteer.html', context = context)
def search(request):
if request.method == 'GET' and 'q' in request.GET:
keyword = request.GET.get("q")
print(keyword)
else:
keyword is None
if keyword is not None and keyword != '':
agencies = Agencies.objects.filter(Q(name__contains=keyword) | Q(username__contains=keyword))
causes = Cause.objects.filter(Q(title__contains=keyword))
users = User.objects.filter(Q(first_name__contains = keyword) | Q(last_name__contains = keyword) | Q(username__contains = keyword))
news_articles = News_Articles.objects.filter(Q(title__contains = keyword) | Q(description__contains = keyword))
else:
agencies = None
causes = None
users = None
news_articles = None
context = {
"agencies": agencies,
"news_articles": news_articles,
"users": users,
"is_user": checkAuth(request),
"causes": causes,
}
return render(request, 'main/search.html', context=context)
def donationPredictor(request):
if request.method == 'POST':
city_id = request.POST.get('city_id')
type_of_cause = request.POST.get('cause_type_id')
if city_id:
city = City.objects.filter(display_name=city_id).values_list('population', flat=True)
print(city)
if not type_of_cause:
df8 = pd.DataFrame()
if type_of_cause:
df = pd.DataFrame(list(Cause.objects.all().filter(type_of_cause=type_of_cause).values()))
#for i in df.iterrows():
cause_ids = (df['id'])
cause_location_ids = (df['location_id'])
df1 = pd.DataFrame(list(City.objects.all().filter(id__in=cause_location_ids).values()))
print("here's the new dataframe with city info to join on:")
print(df1)
df3 = pd.merge(df, df1, left_on='location_id', right_on='id')
print("here's the joined dataframe:")
print(df3)
requests = Request_In_Progress.objects.all().filter(cause__in=cause_ids).values()
volunteers = Volunteering.objects.all().filter(cause__in=cause_ids).values()
if requests:
if volunteers:
df4 = pd.DataFrame(list(requests))
df5 = pd.merge(df4, df3, left_on='cause_id', right_on='id_x')
df5.columns = ['quantity' if x=='amount_total' else x for x in df5.columns]
df6 = pd.DataFrame(list(volunteers))
df6.columns = ['quantity' if x=='number_of_volunteers' else x for x in df6.columns]
df6['item']='Volunteers'
df7 = pd.merge(df6, df3, left_on='cause_id', right_on='id_x')
df8 = pd.concat([df5, df7])
else:
df4 = pd.DataFrame(list(requests))
df8 = pd.merge(df4, df3, left_on='cause_id', right_on='id_x')
df8.columns = ['quantity' if x=='amount_total' else x for x in df8.columns]
elif volunteers:
df6 = pd.DataFrame(list(volunteers))
df6.columns = ['quantity' if x=='number_of_volunteers' else x for x in df6.columns]
df6['item']='Volunteers'
df8 = pd.merge(df6, df3, left_on='cause_id', right_on='id_x')
else:
df8 = pd.DataFrame()
if not df8.empty:
print("not emtp")
if not city_id:
if not type_of_cause:
df8 = pd.DataFrame()
else:
df8 = pd.DataFrame()
cause_types = Cause.objects.values_list('type_of_cause', flat=True).distinct()
cities = City.objects.values_list('display_name', flat=True)
context = {
"is_user": checkAuth(request),
"cause_types": cause_types,
"cities": cities,
"df": df8,
}
return render(request, "main/donationPredictor.html", context=context)
|
{"/mysite/myapp/urls.py": ["/mysite/myapp/models.py"]}
|
27,497
|
ChicoState/DonateNeed
|
refs/heads/master
|
/mysite/myapp/admin.py
|
from django.contrib import admin
from . import models
admin.site.register(models.Profile)
admin.site.register(models.Cause)
admin.site.register(models.Agencies)
admin.site.register(models.News_Articles)
admin.site.register(models.Request_In_Progress)
admin.site.register(models.Request_Fulfilled)
admin.site.register(models.Volunteering)
admin.site.register(models.Social_Media_Post)
admin.site.register(models.Agency_Social_Media_Post)
|
{"/mysite/myapp/urls.py": ["/mysite/myapp/models.py"]}
|
27,498
|
ChicoState/DonateNeed
|
refs/heads/master
|
/mysite/myapp/migrations/0002_auto_20201005_1346.py
|
# Generated by Django 2.2.5 on 2020-10-05 20:46
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='agencies',
name='admin_users',
field=models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL),
),
]
|
{"/mysite/myapp/urls.py": ["/mysite/myapp/models.py"]}
|
27,499
|
ChicoState/DonateNeed
|
refs/heads/master
|
/mysite/myapp/models.py
|
from django.db import models
from django.forms import ModelForm
from django.contrib.auth.models import User
from phone_field import PhoneField
from django.conf import settings
from django.db.models.signals import post_save
from django.dispatch import receiver
from cities_light.models import City
CAUSE_TYPES =(
("hurricane", "Hurricanes and Tropical Storms"),
("earthquake", "Earthquakes"),
("fire", "Fires"),
("flood", "Floods"),
("tornado", "Tornadoes"),
("tsunami", "Tsunamies"),
("winter_storm", "Winter and Ice Storms"),
("general", "General Organization Request")
)
class Cause(models.Model):
title = models.CharField(max_length=100)
location = models.ForeignKey(City, on_delete=models.PROTECT, default=None) #models.CharField(max_length=100)
username = models.CharField(max_length=100, null=True)
type_of_cause = models.CharField(max_length=100, choices=CAUSE_TYPES, default=None)
# requests_in_progress = models.ManyToManyField(models.Request_In_Progress)
def __str__(self):
return self.title
class News_Articles(models.Model):
picture = models.URLField(max_length=100, null=True, blank=True)
url = models.URLField(max_length=100, unique=True)
title = models.CharField(max_length=100, null=True)
description = models.CharField(max_length=1000, null=True)
# cause = models.ManyToManyField(Cause, on_delete=models.SET_NULL, blank=True, null=True)
class Agencies(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField(max_length=50)
address = models.CharField(max_length=100)
url = models.URLField(max_length=200)
phone = PhoneField()
city = models.ForeignKey(City, on_delete=models.PROTECT) #models.CharField(max_length=100)
username = models.CharField(max_length=100, null=True, blank=True, unique=True)
picture = models.ImageField(upload_to='media/', default="defaultProfilePic.jpg", null=True, blank=True)
causes = models.ManyToManyField(Cause, blank=True)
admin_users = models.ManyToManyField(User, blank=True, related_name="agency")
only_volunteer = models.BooleanField(default=False)
def __str__(self):
return self.name
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
city = models.ForeignKey(City, on_delete=models.PROTECT, null=True, blank=True)
bio = models.TextField(max_length=500, blank=True)
picture = models.ImageField(upload_to='media/', default="defaultProfilePic.jpg", null=True, blank=True)
requests_view_hide_completed = models.BooleanField(default=False)
number_of_donations = models.DecimalField(max_digits=1000000000000, decimal_places=0, default=0)
number_of_volunteering_participations = models.DecimalField(max_digits=1000000000000, decimal_places=0, default=0)
followers = models.ManyToManyField("self", blank=True)
following = models.ManyToManyField("self", blank=True)
agencies_following = models.ManyToManyField(Agencies, blank=True, related_name="user_followers")
def __str__(self):
return self.user.username
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
post_save.connect(create_user_profile, sender=User)
# class Agencies_Page(models.Model):
# # causes = models.ManyToManyField(Cause)
# agency = models.ForeignKey(Agencies, on_delete=models.CASCADE, null=True, blank=True, unique=True)
class Volunteering(models.Model):
number_of_volunteers = models.DecimalField(max_digits=10, decimal_places=0)
date_needed = models.DateField(auto_now=False, auto_now_add=True)
location = models.ForeignKey(City, on_delete=models.PROTECT, blank=True, null=True)
agency = models.ForeignKey(Agencies, on_delete=models.SET_NULL, blank=True, null=True)
cause = models.ForeignKey(Cause, on_delete=models.SET_NULL, blank=True, null=True, related_name='cs2')
address = models.CharField(max_length=100, blank=True, null=True)
amount_fulfilled = models.DecimalField(max_digits=10, decimal_places=2, default=0)
percent_complete = models.DecimalField(max_digits=10, decimal_places=2, default=0)
volunteers = models.ManyToManyField(User, blank=True, related_name="volunteer")
SIZES =(
("xxxs", "XXXS"),
("xxs", "XXS"),
("xs", "XS"),
("s", "S"),
("m", "M"),
("l", "L"),
("xl", "XL"),
("xxl", "2X"),
("xxxl", "3X"),
("xxxxl", "4X"),
)
class Request_In_Progress(models.Model):
item = models.CharField(max_length=250, null=True)
amount_total = models.DecimalField(max_digits=10, decimal_places=0)
size = models.CharField(max_length=100, choices=SIZES, blank=True)
amount_fulfilled = models.DecimalField(max_digits=10, decimal_places=2, default=0)
is_complete = models.BooleanField(default=False)
date_requested = models.DateField(auto_now=False, auto_now_add=True)
agency = models.ForeignKey(Agencies, on_delete=models.SET_NULL, blank=True, null=True)
cause = models.ForeignKey(Cause, on_delete=models.SET_NULL, blank=True, null=True, related_name='cs')
percent_complete = models.DecimalField(max_digits=10, decimal_places=2, default=0)
class Request_Fulfilled(models.Model):
fulfilled_amount = models.DecimalField(max_digits=10, decimal_places=0)
promised_amount = models.DecimalField(max_digits=10, decimal_places=0)
promised_arrival = models.DateField(auto_now=False)
user = models.ForeignKey(User, on_delete=models.SET_NULL, blank=True, null=True)
request_in_progress = models.ForeignKey(Request_In_Progress, on_delete=models.CASCADE, blank=True, null=True)
date = models.DateField(auto_now=True)
class Social_Media_Post(models.Model):
author = models.ForeignKey(User, on_delete=models.CASCADE)
text = models.CharField(max_length=10000, null=True)
agency_profile = models.CharField(max_length=10000, null=True)
agency_name = models.CharField(max_length=10000, null=True)
date_posted = models.DateField(auto_now=False)
type = models.CharField(max_length=20, default=None)
cause_profile = models.CharField(max_length=10000, null=True)
cause_name = models.CharField(max_length=10000, null=True)
class Agency_Social_Media_Post(models.Model):
author = models.ForeignKey(Agencies, on_delete=models.CASCADE)
text = models.CharField(max_length=10000, null=True)
date_posted = models.DateField(auto_now=True)
link = models.CharField(max_length=200, null=True)
cause_profile = models.CharField(max_length=10000, null=True)
cause_name = models.CharField(max_length=10000, null=True)
agency_profile = models.CharField(max_length=10000, null=True)
agency_name = models.CharField(max_length=10000, null=True)
type = models.CharField(max_length=20, default=None)
|
{"/mysite/myapp/urls.py": ["/mysite/myapp/models.py"]}
|
27,500
|
ChicoState/DonateNeed
|
refs/heads/master
|
/mysite/myapp/forms.py
|
from django import forms
from django.core.validators import validate_slug
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.forms import ModelForm
from myapp.models import Agencies
from myapp.models import Profile, Cause
from django.contrib import messages
import re, string
from . import models
# Add your forms
class RegistrationForm(UserCreationForm):
email = forms.EmailField(
label="Email",
required=True
)
class Meta:
model = User
fields = ("username", "email",
"password1", "password2",
"first_name", "last_name")
def clean(self):
cleaned_data = super().clean()
email = cleaned_data.get('email')
username = cleaned_data.get('username')
if email and User.objects.filter(email=email).exclude(username=username).exists():
self.add_error('email', 'This email address is already associated with an account. Please use a different email.')
return cleaned_data
def save(self, commit=True):
user = super(RegistrationForm, self).save(commit=False)
user.email = self.cleaned_data["email"]
if commit:
user.save()
return user
class RegisterDonation(forms.Form):
class Meta:
model = models.Request_In_Progress
fields = ["item", "amount_total", "agency"]
#
# def save(self, commit=True):
# new_sugg = models.Request_In_Progress(
# item=self.cleaned_data["item"],
# amount_total=self.cleaned_data["amount_total"]
# )
# if commit:
# new_sugg.save()
# return new_sugg
class MakeDonation(ModelForm):
class Meta:
model = models.Request_Fulfilled
exclude = ['user', 'request_in_progress', 'fulfilled_amount']
class MakeDonation(ModelForm):
class Meta:
model = models.Request_Fulfilled
exclude = ['user', 'request_in_progress', 'fulfilled_amount']
class AddRequestForm(ModelForm):
class Meta:
model = models.Request_In_Progress
fields = ["item", "amount_total", "cause", "size"]
class AddVolunteerRequestForm(ModelForm):
class Meta:
model = models.Volunteering
exclude = ["agency", "amount_fulfilled", "percent_complete"]
class ProfileForm(ModelForm):
class Meta:
model = Profile
exclude = ['user', 'requests_view_hide_completed', 'number_of_donations', 'number_of_volunteering_participations']
#fields="__all__" #, "picture")
class AddAgencyForm(ModelForm):
class Meta:
model = Agencies
fields = ['admin_users']
# class SelectCityForm(ModelForm):
# class Meta:
# model = City
# fields = ['city_id']
#form to use for updating data in agencies class
class AgencyForm(ModelForm):
class Meta:
model = Agencies
exclude = ['causes']
def clean(self):
cleaned_data = super().clean()
email = cleaned_data.get('email')
name = cleaned_data.get('name')
capitalized_name = string.capwords(name)
uname = (re.sub(r"\s+", "", capitalized_name))
if Agencies.objects.filter(username=uname).exists():
print("true")
self.add_error('name', forms.ValidationError(('This agency name already exists.')))
if name and Agencies.objects.filter(name=name).count()>1:
self.add_error('name', forms.ValidationError(('This agency name already exists. If this is your agency, add them to your profile in our profile editor'), code="name_error"))
if email and Agencies.objects.filter(email=email).exclude(name=name).exists():
self.add_error('email', forms.ValidationError('This agency email has already been used.', code="email_error"))
return cleaned_data
class PledgeSupportForm(ModelForm):
class Meta:
model = Agencies
fields = ['causes']
# def __init__ (self, *args, **kwargs):
# super(PledgeSupportForm, self).__init__(*args, **kwargs)
# self.fields["causes"].widget = forms.widgets.CheckboxSelectMultiple()
# self.fields["causes"].help_text = ""
# self.fields["causes"].queryset = models.Cause.objects.all()
def clean(self):
cleaned_data = super().clean()
causes = cleaned_data.get('causes')
for cause in causes:
if Agencies.objects.filter(causes=cause).exists():
self.add_error('causes', forms.ValidationError("One of the causes you selected is already included in your agency's profile"))
return
class CauseForm(ModelForm):
class Meta:
model= Cause
exclude = ['username']
def clean(self):
cleaned_data = super().clean()
title = cleaned_data.get('title')
if title and models.Cause.objects.filter(title=title).count()>1:
self.add_error('title', forms.ValidationError(('This title already exists. Please be more descriptive'), code='title_error'))
return cleaned_data
class HideCompletedRequestsForm(ModelForm):
requests_view_hide_completed = forms.BooleanField(required=False, label='Hide Completed Requests',widget=forms.CheckboxInput(attrs={'onclick': 'this.form.submit()'}))
class Meta:
model = Profile
fields = ['requests_view_hide_completed']
labels = {
'requests_view_hide_completed': 'Hide Completed Requests'
}
class FilterAgencyForm(ModelForm):
class Meta:
model = Agencies
fields = ['name']
|
{"/mysite/myapp/urls.py": ["/mysite/myapp/models.py"]}
|
27,501
|
ChicoState/DonateNeed
|
refs/heads/master
|
/mysite/myapp/tests.py
|
from django.http import HttpRequest
from django.test import SimpleTestCase
from django.test import TestCase
from django.urls import reverse
from myapp.views import *
from myapp.models import *
from myapp.forms import RegistrationForm
import datetime
from django.test import Client
class home_view_test(TestCase):
def test_Sanity(self):
self.assertEqual(True,True)
def test_home_url(self):
response = self.client.get('/')
self.assertEquals(response.status_code, 200)
def test_view_url_by_name(self):
response= self.client.get(reverse('about'))
self.assertEquals(response.status_code, 200)
def test_view_uses_correct_template(self):
response = self.client.get(reverse('home'))
self.assertTemplateUsed(response, 'main/index.html')
def test_home_page_contains(self):
response = self.client.get('/')
self.assertContains(response,'<h1 class="display-3 text-center ml3" style="color:white">Changing the World, one Donation at a Time</h1>')
def test_not_on_home_page(self):
response=self.client.get('/')
self.assertNotContains(response, 'i should not exist')
class Agencies_view_test(TestCase):
def test_agencies_url(self):
response = self.client.get('/agencies')
self.assertEquals(response.status_code, 200)
def test_trending_url(self):
response = self.client.get('/trending')
self.assertEquals(response.status_code, 200)
def test_about_url(self):
response = self.client.get('/about')
self.assertEquals(response.status_code, 200)
def test_Signup_url(self):
response = self.client.post('/signUp')
self.assertEquals(response.status_code, 200)
def test_signin_url(self):
response= self.client.get(reverse('login'))
self.assertEquals(response.status_code, 200)
def test_postsignin_url(self):
response= self.client.get(reverse('postsignin'))
self.assertEquals(response.status_code, 200)
#def test_profile_url(self):
#User.objects.create(username = 'batman', password = 'thisisrobin')
#self.client.login(username='batman', password = 'thisisrobin')
#response= self.client.post('/profile/batman')
#self.assertEquals(response.status_code, 302)
def test_agencySignUp_url(self):
response= self.client.post(reverse('agencySignUp'))
self.assertEquals(response.status_code, 200)
#def test_agencyProfile(self):
#response= self.client.get(reverse('agencyProfile'))
#self.assertEquals(response.status_code, 200)
def test_createProfile_url(self):
user=User.objects.create(username = 'batman')
user.set_password('thisisrobin')
user.save()
#self.client.login(username='batman', password = 'thisisrobin')
c = Client()
logged_in = c.login(username='batman', password='thisisrobin')
response= self.client.post(reverse('createProfile'))
self.assertTrue(logged_in)
self.assertEquals(response.status_code, 302)
def test_createcause_url(self):
User.objects.create(username = 'batman', password = 'thisisrobin')
self.client.login(username='batman', password = 'thisisrobin')
response= self.client.post(reverse('createCause'))
self.assertEquals(response.status_code, 302)
def test_addAgency_url(self):
User.objects.create(username = 'batman', password = 'thisisrobin')
self.client.login(username='batman', password = 'thisisrobin')
response= self.client.post(reverse('addAgency'))
self.assertEquals(response.status_code, 302)
def test_pledgesupport_url(self):
user=User.objects.create(username = 'batman')
user.set_password('thisisrobin')
user.save()
c = Client()
logged_in = c.login(username='batman', password='thisisrobin')
#login=self.client.login(username='batman', password = 'thisisrobin')
response= self.client.post(reverse('pledgeSupport'))
self.assertEquals(response.status_code, 302)
self.assertTrue(logged_in)
def test_agencyProfile(self):
self.user=User.objects.create(username = 'batman', password = 'thisisrobin')
login=self.client.login(username='batman', password = 'thisisrobin')
response= self.client.post('/agencyProfile/batman')
self.assertEquals(response.status_code, 302)
def test_logoutview(self):
response= self.client.post(reverse('logout'))
self.assertEquals(response.status_code, 302)
#def test_causepage(self):
#self.user=User.objects.create(username = 'batman', password = 'thisisrobin')
#login=self.client.login(username='batman', password = 'thisisrobin')
#response= self.client.post('/causePage/batman')
#self.assertEquals(response.status_code, 302)
class model_test(TestCase):
def setUp(self):
usher = User.objects.create(username = 'george')
Agencies.objects.create(name = 'fred', email = 'test@gmail.com',address = '123 main st', url= 'google.com', phone = 5051234567 )
News_Articles.objects.create(title='nothing',url = 'bing.com', description = 'Austin')
Request_Fulfilled.objects.create(fulfilled_amount = '10', promised_amount = '11', promised_arrival = '2000-01-01')
Request_In_Progress.objects.create(amount_total = '5',amount_fulfilled='2',is_complete=False,date_requested='2050-12-12',agency=Agencies.objects.get(id=1))
#Account_Page.objects.create(requests_fulfilled=Request_Fulfilled.objects.get(id=1))
#Agencies_Page.objects.create(causes='world domination', requests = Request_In_Progress.objects.get(id=1))
Cause.objects.create(title='fancy feast',location='Hogwarts')
def test_model_Agencies(self):
test = Agencies.objects.get(id=1)
expected = f'{test.name}'
self.assertEquals(expected,'fred')
eml = test.email
self.assertEquals(eml,'test@gmail.com')
ddrss = test.address
self.assertEquals(ddrss,'123 main st')
rl = test.url
self.assertEquals(rl,'google.com')
self.assertEquals(True,True)
def test_model_News_Articles(self):
test = News_Articles.objects.get(id=1)
self.assertEquals(test.url,'bing.com')
def test_model_Request_Fulfilled(self):
test = Request_Fulfilled.objects.get(id=1)
famount=test.fulfilled_amount
self.assertEquals(famount,10)
pamount=test.promised_amount
self.assertEquals(pamount,11)
parrival=test.promised_arrival
self.assertEquals(parrival, datetime.date(2000,1,1))
def test_model_Request_In_Progress(self):
test = Request_In_Progress.objects.get(id=1)
amt_tot=test.amount_total
self.assertEquals(amt_tot,5)
amt_ful=test.amount_fulfilled
self.assertEquals(amt_ful,2)
done=test.is_complete
self.assertNotEqual(done,True)
date_req=test.date_requested
self.assertEquals(date_req,datetime.date(2050,12,12))
self.assertEquals(test.agency,Agencies.objects.get(id=1))
def test_model_Account_Page(self):
self.assertEquals(True,True)
#def test_model_Agencies_Page(self):
#test = Agencies_Page.objects.get(id=1)
#self.assertEquals(test.causes,'world domination')
#self.assertEquals(test.requests,Request_In_Progress.objects.get(id=1))
def test_model_Causes(self):
test = Cause.objects.get(id=1)
self.assertEquals(test.title,'fancy feast')
self.assertEquals(test.location, 'Hogwarts')
class form_test(TestCase):
def test_RegistrationForm(self):
valid_data = {
"username": "test@gmail.com",
"email": "test@yahoo.com",
"password1": "s3cr3tshh",
"password2": "s3cr3tshh",
"first_name": "john",
"last_name": "doe"
}
form = RegistrationForm(data=valid_data)
self.assertEquals(form.fields['password1'].label, 'Password' )
form.is_valid()
self.assertFalse(form.errors)
#def test_CauseForm(self):
|
{"/mysite/myapp/urls.py": ["/mysite/myapp/models.py"]}
|
27,502
|
ChicoState/DonateNeed
|
refs/heads/master
|
/mysite/mysite/settings.py
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '33el*v@@)zi57r_q_1nrjta^tq6n&8hw(v1w(=)aiw#oe1p9dz'
DEBUG = True
# SECURITY WARNING: make sure you update this to your websites URL
ALLOWED_HOSTS = ['*']
X_FRAME_OPTIONS = 'ALLOW ALL'
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'cities_light',
'dal',
'dal_select2',
'serve_shiny',
'django_extensions',
'phone_field',
'myapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{ 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator' },
{ 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator' },
{ 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator' },
{ 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator' },
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "myapp/static"),
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'myapp/static/media')
MEDIA_URL = '/media/'
CITIES_LIGHT_TRANSLATION_LANGUAGES = ['en']
CITIES_LIGHT_INCLUDE_COUNTRIES = ['US']
CITIES_LIGHT_INCLUDE_CITY_TYPES = ['PPL', 'PPLA', 'PPLA2', 'PPLA3', 'PPLA4', 'PPLC', 'PPLF', 'PPLG', 'PPLL', 'PPLR', 'PPLS', 'STLMT',]
SHINY_SERVER_DIRECTORY = [BASE_DIR]#[os.path.join(BASE_DIR, "Capstone"),] # This is the file path where Shiny-Server is configured to serve apps from
SHINY_TEMPLATE_FILE = [os.path.join(BASE_DIR, "Capstone/app.R"),] # This is either a string with the full path to a Shiny app template
# or a list of strings with just the file names of Shiny templates.
#SHINY_TEMPLATE_DIRECTORY = [] # This setting is required if your Shiny app is multiple files and SHINY_TEMPLATE_FILE was
#set as a list of file names.
#SHINY_CONTEXT = []# A dictionary to use as context for Shiny apps
SHINY_SERVER_URL = [BASE_DIR]#[os.path.join(BASE_DIR, "Capstone"),]# The root URL of the Shiny-Server
|
{"/mysite/myapp/urls.py": ["/mysite/myapp/models.py"]}
|
27,504
|
CristieNordic/Rubrik-ART
|
refs/heads/main
|
/example-config.py
|
## Base64 encoded admin:Strangep4assw0rd
#auth = {'Authorization': 'Basic YWRtaW46U3RyYW5nZXA0YXNzdzByZA=='}
RUBRIK_AUTH = 'YWRtaW46U3RyYW5nZXA0YXNzdzByZA=='
RUBRIK_URL = 'rubrik.company.com'
|
{"/rubrik-art.py": ["/config.py"]}
|
27,505
|
CristieNordic/Rubrik-ART
|
refs/heads/main
|
/rubrik-art.py
|
#!/usr/bin/python3
# Automatic Restore Tester Rubrik
# By Cristie Nordic AB
import requests
import urllib3
import json
import random
import logging
import os
import argparse
from base64 import b64encode
logging.basicConfig(filename='restore_test.log', level=logging.DEBUG)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def get_config(url=None, api_ver='v1', auth=None, logname='restore_test.log'):
print('Get Configuration Information')
if url:
logging.info('URL has been used with the function call')
elif os.getenv('RUBRIK_URL'):
logging.info('System Variable RUBRIK_URL will be used')
url = os.getenv('RUBRIK_URL')
else:
from config import RUBRIK_URL
logging.info('Using URL from configuration File')
url = RUBRIK_URL
rubrik_url = 'https://{url}/api/{api_ver}'.format(**locals())
if auth:
logging.info('Auth has been set part of the function')
elif os.getenv('RUBRIK_AUTH'):
logging.info('Find the Rubrik Authentication as a system variable')
auth = os.getenv('RUBRIK_AUTH')
else:
from config import RUBRIK_AUTH
logging.info('Using Authenication from configuration file')
auth = RUBRIK_AUTH
rubrik_auth = { 'Authorization' : 'Basic %s' % auth }
return(rubrik_url, rubrik_auth)
def encode_username_password(username, password):
print('Encoding your username and password')
username_and_password = str.encode('{username}:{password}'.format(**locals()))
return(b64encode(username_and_password).decode('ascii'))
def get_random_vm(rubrik_url=None, rubrik_auth=None):
print('Get Random VM')
vms = requests.get(rubrik_url+'/vmware/vm', headers=rubrik_auth, verify=False).json()['data']
active = []
for i,e in enumerate(vms):
if vms[i]['effectiveSlaDomainName'] != 'Unprotected' and vms[i]['powerStatus'] == 'poweredOn':
active.append(vms[i])
return vms[+random.randint(0, len(active)-1)]
def get_random_snapshot(vmId=None, rubrik_url=None, rubrik_auth=None):
snapshotlist = requests.get(rubrik_url+'/vmware/vm/'+vmId+'/snapshot', headers=rubrik_auth, verify=False).json()['data']
if len(snapshotlist) == 1:
return snapshotlist[0]['id']
elif len(snapshotlist) == 0:
logging.warning('No Snapshots found')
exit('No Snapshots found')
random_snapshot = snapshotlist[+random.randint(0, len(snapshotlist)-1)]['id']
logging.info('Random Snapshot being restored : %s',random_snapshot)
return random_snapshot
def restore_random_vm(powerOn = 'false', disableNetwork ='true', rubrik_url=None, rubrik_auth=None):
"""example : restore_random_vm('false','false') will not power on vm and will not disable network"""
restore_vm = get_random_vm(rubrik_url=rubrik_url, rubrik_auth=rubrik_auth)
print('Restore VM:', restore_vm['name'])
logging.info('Random VM name %s:', restore_vm['name'])
restore_name='RestoreTest-'+restore_vm['name']
logging.info('Restored VM name %s:', restore_name)
restore_random_snapshot = get_random_snapshot(vmId=restore_vm['id'], rubrik_url=rubrik_url, rubrik_auth=rubrik_auth)
restore_url = rubrik_url+'/vmware/vm/snapshot/'+restore_random_snapshot+'/mount'
logging.info(restore_url)
payload = '{"vmName" : "'+restore_name+'", "powerOn" : '+powerOn+', "disableNetwork" : '+disableNetwork+'}'
logging.info(payload)
restore_test = requests.post(restore_url, headers=rubrik_auth, data=payload, verify=False)
print('Restore Test Finish...')
return restore_test
def main():
if a.username and a.password:
base64_authenication = encode_username_password(a.username, a.password)
elif a.key:
base64_authenication = a.key
if a.url:
if base64_authenication:
rubrik_url, rubrik_auth = get_config(url=a.url, auth=base64_authenication)
else:
rubrik_url, rubrik_auth = get_config(url=a.url)
else:
rubrik_url, rubrik_auth = get_config()
art = restore_random_vm(rubrik_url=rubrik_url, rubrik_auth=rubrik_auth)
if art.status_code == 202:
print('Successfully restore VM...')
else:
print(art)
if __name__ == "__main__":
p = argparse.ArgumentParser(
description='''Rubrik Automatic Restore Test''',
epilog='''Contact support@cristie.se'''
)
g = p.add_mutually_exclusive_group(required = False)
p.add_argument("--url", "-U", help = "IP or FQDN Address to your Rubrik Cluster")
g.add_argument("--key", "-k", help = "Username and Password in Base64 encoded format")
g.add_argument("--username", "-u", help = "Username name in clear text format")
p.add_argument("--password", "-p", help = "Password in in clear text format")
a = p.parse_args()
main()
|
{"/rubrik-art.py": ["/config.py"]}
|
27,506
|
CristieNordic/Rubrik-ART
|
refs/heads/main
|
/config.py
|
RUBRIK_AUTH = 'base64 encoded username:password string'
RUBRIK_URL = 'IP or FQDN Address'
|
{"/rubrik-art.py": ["/config.py"]}
|
27,510
|
a74nh/armiesofsigmar
|
refs/heads/master
|
/armiesofsigmar/printoption.py
|
from enum import Enum
class PrintOption(Enum):
SILENT = 0
PRINT = 1
VERBOSE = 2
|
{"/army_generator.py": ["/armiesofsigmar/__init__.py"]}
|
27,511
|
a74nh/armiesofsigmar
|
refs/heads/master
|
/army_generator.py
|
import argparse
from armiesofsigmar import ArmyGenerator, load_restictions, PrintOption
parser = argparse.ArgumentParser(description='Generate valid armies for Warhammer Age Of Sigmar')
parser.add_argument('configfile', metavar='config', type=str, nargs=1, help='config file to use')
parser.add_argument('-v','--verbose', help='Print armies in verbose mode', action='store_true')
parser.add_argument('-f','--fail', help='Show all failed armies', action='store_true')
args = parser.parse_args()
showfails = PrintOption.SILENT
if args.fail:
showfails = PrintOption.PRINT
showarmies = PrintOption.PRINT
if args.verbose:
showarmies = PrintOption.VERBOSE
restrict_config = load_restictions(args.configfile[0])
gen = ArmyGenerator(restrict_config, printarmies=showarmies, showfails=showfails)
gen.restrict_units(showfails=showfails)
armies = gen.generate_army()
### Alternatively, print after generating:
# gen = ArmyGenerator(restrict_config, printarmies=showarmies, showfails=PrintOption.SILENT)
# armies = gen.generate_army()
# for army in armies:
# if args.verbose:
# print army.fullstr()
# else:
# print army
|
{"/army_generator.py": ["/armiesofsigmar/__init__.py"]}
|
27,512
|
a74nh/armiesofsigmar
|
refs/heads/master
|
/armiesofsigmar/battalion.py
|
import copy
import re
from printoption import PrintOption
from unit import Unit
class Battalion(object):
def __init__(self, unit_config):
self.unit_config = unit_config
self.units = []
for c in self.unit_config["units"]:
self.units.append(Unit(c, "unit"))
def __str__(self):
if self.unitsize() == 0:
return ""
line = [("{}({}):[".format(self.name(), self.points()))]
unitline = []
for unit in self.units:
unitstr = unit.str_battalion()
if len(unitstr) > 0:
unitline.append(unitstr)
line.append(", ".join(sorted(unitline, key=lambda x: re.sub('[^A-Za-z]+', '', x).lower())))
line.append("]")
return " ".join(line)
def fullstr(self):
if self.unitsize() == 0:
return ""
line = [("\t{} (Warscroll Battalion)".format(self.name()))]
line.append("\t\tTotal Points: {}".format(self.points()))
unitline = []
for unit in self.units:
unitstr = unit.fullstr(tabs=2)
if len(unitstr) > 0:
unitline.append(unitstr)
line.append("\n".join(sorted(unitline, key=lambda x: re.sub('[^A-Za-z]+', '', x).lower())))
line.append("")
return "\n".join(line)
def __repr__(self):
return "{}:{}".format(self.name(),str(self.units))
def __len__(self):
return len(self.units)
def __getitem__(self,index):
if index < len(self.units):
return self.units[index]
raise IndexError("index out of range")
def __setitem__(self,index,item):
if index < len(self.units):
self.units[index] = item
return
raise IndexError("index out of range")
def is_type(self, unittype):
return "battalion" == unittype
def unitsize(self):
size = 0
for unit in self.units:
size = size + unit.unitsize()
return size
#Points of just the battalion (no units)
def battalion_points(self):
return self.unit_config.get("points", 0)
def points(self):
if self.unitsize() == 0:
return 0
points = self.battalion_points()
for unit in self.units:
points = points + unit.points()
return points
def name(self):
return self.unit_config["name"]
def is_unique(self):
return False
# return self.unit_config.get("unique", False)
def roles(self):
return self.unit_config.get("roles", [])
def keywords(self):
return []
# return self.unit_config.get("keywords", [])
def move(self, wounds_suffered=0):
move = self.unit_config.get("move", 0)
if type(move) is not dict:
return move
if wounds_suffered > self.wounds_per_unit():
wounds_suffered = self.wounds_per_unit()
while wounds_suffered > 0 and move.get(wounds_suffered, None) == None:
wounds_suffered = wounds_suffered - 1
return "{}*".format(move.get(wounds_suffered, 0))
def wounds_per_unit(self):
return self.unit_config.get("wounds", 0)
# Total number of wounds across all units
def total_wounds(self):
return self.wounds_per_unit() * self.unitsize()
def wounds_str(self):
wounds = self.wounds_per_unit()
if self.unitsize() == 1:
return str(wounds)
return "{}({})".format(wounds, wounds * self.unitsize())
def save(self):
save = self.unit_config.get("save", 0)
if type(save) is str and save == "-":
return 6
return save
def save_str(self):
save = self.unit_config.get("save", 0)
if type(save) is str:
return save
return "{}+".format(save)
def bravery(self):
return self.unit_config.get("bravery", 0)
def sum_roles(self, roles):
for unit in self.units:
if unit.count > 0:
for r in unit.roles():
roles[r] = roles.get(r,0) + unit.count
def is_valid(self, restrict_battalion, restrict_config, final=True, showfails=PrintOption.SILENT):
#TODO: Currently only support 1 or 0 instances of a single battalion
count = 0
if self.unitsize() > 0:
count = 1
# Check unit meets min restriction
if final and count < restrict_battalion["min"]:
if showfails.value > PrintOption.SILENT.value:
print "FAIL MIN restrict {} {} {} : {}".format(self.name(), restrict_battalion["min"], count, self)
return False
if self.unitsize() == 0:
return True
# Check unit meets max restriction
if restrict_battalion["max"] != -1 and count >restrict_battalion["max"]:
if showfails.value > PrintOption.SILENT.value:
print "FAIL MAX restrict {} {} {} : {}".format(self.name(), restrict_battalion["max"], count, self)
return False
#Check units and count up roles
for unit in self.units:
#TODO: Restrict from both restrict config and unit_config !!!
restrict_unit = unit.unit_config
restrict_keywords = []
if not unit.is_valid(restrict_unit, restrict_keywords, final, showfails):
return False
return True
|
{"/army_generator.py": ["/armiesofsigmar/__init__.py"]}
|
27,513
|
a74nh/armiesofsigmar
|
refs/heads/master
|
/army_printer.py
|
import argparse
import yaml
from armiesofsigmar import Army, load_units, load_rules
parser = argparse.ArgumentParser(description='Print Warhammer Age Of Sigmar army')
parser.add_argument('configfile', metavar='config', type=str, nargs=1, help='config file to use')
parser.add_argument('-v','--verbose', help='Print army in verbose mode', action='store_true')
args = parser.parse_args()
#Open the configfile
configlist = []
with open(args.configfile[0], 'r') as f:
configlist = yaml.load(f)
#Get list of all possible units
units_config = load_units(unitlists=configlist.get("config", []))
#Create an empty army
army = Army(units_config)
#Add all our units to the army
for u in configlist.get("units",[]):
army.add(u, "unit")
for u in configlist.get("allies",[]):
army.add(u, "ally")
for u in configlist.get("battalions",[]):
army.add(u, "battalion")
#Load the rulebook
try:
rules_config = load_rules(configlist["rulebook"], configlist["size"])
except:
rules_config = {}
if args.verbose:
print army.fullstr(rules_config)
else:
print army
# if rules_config:
# if army.is_valid(rules_config, {}):
# print "Army is valid"
# else:
# print "Army is NOT valid"
|
{"/army_generator.py": ["/armiesofsigmar/__init__.py"]}
|
27,514
|
a74nh/armiesofsigmar
|
refs/heads/master
|
/armiesofsigmar/__init__.py
|
from generate import ArmyGenerator
from printoption import PrintOption
from configloader import load_restictions, load_units, load_rules
from army import Army
|
{"/army_generator.py": ["/armiesofsigmar/__init__.py"]}
|
27,515
|
a74nh/armiesofsigmar
|
refs/heads/master
|
/armiesofsigmar/generate.py
|
from configloader import load_units, load_rules
from army import Army
import copy
from printoption import PrintOption
class ArmyGenerator(object):
def __init__(self, restrict_config, printarmies=PrintOption.SILENT, showfails=PrintOption.SILENT):
self.restrict_config = restrict_config
self.units_config = load_units(restrict_config["rulebook"], restrict_config["unitlists"])
self.rules_config = load_rules(restrict_config["rulebook"], restrict_config["size"])
self.printarmies = printarmies
self.showfails = showfails
# Use the restrict config to cut down the number of units in units_config
# Will speed up generation (due to less options to parse)
def restrict_units(self, showfails=PrintOption.SILENT):
#Check units
newunits = []
for unit in self.units_config["units"]:
if self._check_unit_restrict(unit, showfails):
newunits.append(unit)
self.units_config["units"] = newunits
# Check allies
max_allies = self.restrict_config.get("max_allies", self.rules_config.get("allies", 0))
if max_allies== 0:
self.units_config["allies"] = []
if showfails.value > PrintOption.SILENT.value:
print "ALLY RESTRICT allies max {}".format(max_allies)
else:
newallies = []
for unit in self.units_config["allies"]:
if self._check_ally_restrict(unit, showfails):
newallies.append(unit)
self.units_config["allies"] = newallies
# Check battalions
max_battalions = self.restrict_config.get("max_battalions", 0)
if max_battalions== 0:
self.units_config["battalions"] = []
if showfails.value > PrintOption.SILENT.value:
print "BATTALIONS RESTRICT allies max {}".format(max_battalions)
else:
newbattalions = []
for unit in self.units_config["battalions"]:
if self._check_battalion_restrict(unit, showfails):
newbattalions.append(unit)
self.units_config["battalions"] = newbattalions
def _check_unit_restrict(self, unit, showfails):
name = unit.get("name","")
roles = unit.get("roles",[])
keywords = unit.get("keywords",[])
restrict_unit = self.restrict_config["units"].get(name, self.restrict_config["units"]["__Others"])
# Check unit meets max restriction
if restrict_unit["max"] == 0:
if showfails.value > PrintOption.SILENT.value:
print "UNIT RESTRICT MAX restrict {} {}".format(name, restrict_unit["max"])
return False
# Check unit meets max role restriction
for role in roles:
if self.rules_config["units"][role]["max"] == 0:
if showfails.value > PrintOption.SILENT.value:
print "UNIT RESTRICT Role MAX {} {} {}".format(name, role, self.rules_config["units"][role]["max"])
return False
# Check keyword match. Empty list means allow anything
match = False
restrict_keywords = self.restrict_config.get("keywords", [])
if not restrict_keywords:
match = True
for restrict_keyword in restrict_keywords:
if restrict_keyword in keywords:
match = True
if not match:
if showfails.value > PrintOption.SILENT.value:
print "UNIT RESTRICT Keyword restrict: {} {} {}".format(name, keywords, restrict_keywords)
return False
return True
def _check_ally_restrict(self, unit, showfails):
name = unit.get("name","")
keywords = unit.get("keywords",[])
restrict_unit = self.restrict_config["allies"].get(name, self.restrict_config["allies"]["__Others"])
# Check unit meets max restriction
if restrict_unit["max"] == 0:
if showfails.value > PrintOption.SILENT.value:
print "ALLY RESTRICT MAX restrict {} {}".format(name, restrict_unit["max"])
return False
# Check keyword match. Empty list means allow anything
match = False
restrict_keywords = self.restrict_config.get("allies_keywords", [])
if not restrict_keywords:
match = True
for restrict_keyword in restrict_keywords:
if restrict_keyword in keywords:
match = True
if not match:
if showfails.value > PrintOption.SILENT.value:
print "ALLY RESTRICT Ally Keyword restrict: {} {} {}".format(name, keywords, restrict_keywords)
return False
return True
def _check_battalion_restrict(self, unit, showfails):
restrict_unit = self.restrict_config["battalions"].get(name, self.restrict_config["battalions"]["__Others"])
print restrict_unit
# Check unit meets max restriction
if restrict_unit["max"] == 0:
if showfails.value > PrintOption.SILENT.value:
print "BATTALION RESTRICT MAX restrict {} {}".format(name, restrict_unit["max"])
return False
return True
def generate_army(self):
self.finalarmies = []
self._generate(Army(self.units_config), 0, 0)
return self.finalarmies
def _generate(self, army, min_start_index, battalion_inner_index):
if not army.is_valid(self.rules_config, self.restrict_config, final=False, showfails=self.showfails):
return
if army.is_valid(self.rules_config, self.restrict_config, final=True, showfails=self.showfails):
if self.printarmies == PrintOption.PRINT:
print(army)
if self.printarmies == PrintOption.VERBOSE:
print(army.fullstr(self.rules_config))
self.finalarmies.append(copy.deepcopy(army))
return
for unitid in range(min_start_index, len(army)):
if army[unitid].is_type("battalion"):
for battalionid in range(battalion_inner_index, len(army[unitid])):
army[unitid][battalionid].inc(1)
self._generate(army, unitid, battalionid)
army[unitid][battalionid].inc(-1)
else:
army[unitid].inc(1)
self._generate(army, unitid, 0)
army[unitid].inc(-1)
|
{"/army_generator.py": ["/armiesofsigmar/__init__.py"]}
|
27,516
|
a74nh/armiesofsigmar
|
refs/heads/master
|
/armiesofsigmar/allies.py
|
from units import Units
from unit import Unit
from printoption import PrintOption
import re
class Allies(Units):
def __init__(self, units_config):
self.units_config = units_config
self.units = []
for c in units_config:
self.units.append(Unit(c, "ally"))
def fullstr(self):
line = []
unitline = []
for unit in self.units:
unitstr = unit.fullstr(tabs=2)
if len(unitstr) > 0:
unitline.append(unitstr)
if unitline:
line.append("\tAllies")
line.append("\t\tTotal Points: {}".format(self.points()))
line.append("\n".join(sorted(unitline, key=lambda x: re.sub('[^A-Za-z]+', '', x).lower())))
return "\n".join(line)
def is_valid(self, restrict_config, final=True, showfails=PrintOption.SILENT):
for unit in self.units:
restrict_unit = self._restrict_unit(restrict_config, unit.name(), "allies")
restrict_keywords = restrict_config.get("allies_keywords", [])
if not unit.is_valid(restrict_unit, restrict_keywords, final, showfails):
return False
return True
|
{"/army_generator.py": ["/armiesofsigmar/__init__.py"]}
|
27,517
|
a74nh/armiesofsigmar
|
refs/heads/master
|
/armiesofsigmar/army.py
|
import re
from units import Units
from allies import Allies
from battalions import Battalions
from printoption import PrintOption
import itertools
class Army(object):
def __init__(self, units_config):
self.units_config = units_config
self.units = Units(units_config["units"])
self.allies = Allies(units_config["allies"])
self.battalions = Battalions(units_config["battalions"])
self.all = [self.units, self.allies, self.battalions]
def __str__(self):
line = []
for u in self.all:
s = str(u)
if s:
line.append(s)
return "{}: {}".format(self.points(), ", ".join(line))
def __repr__(self):
return str(self.units)
def __len__(self):
return len(self.units) + len(self.allies) + len(self.battalions)
def fullstr(self, rules_config={}):
points = self.points()
if rules_config:
line = [("Points {} [{} to {}]".format(points, rules_config["points"] - points, rules_config["points"]))]
else:
line = [("Points {}".format(points))]
line.append("\tWounds: {}, Models: {}, Bravery/Unit: {:.2f}, Save/Wound: {:.2f}+".format(
self.wounds(),
self.unitsize(),
self.avg_bravery_per_unit(),
self.avg_save_per_wound()))
for u in self.all:
s = u.fullstr()
if s:
line.append(s)
line.append("Roles: {}".format(self.sum_roles_str(rules_config)))
line.append("")
return "\n".join(line)
def __getitem__(self,index):
for u in self.all:
if index < len(u):
return u[index]
index = index - len(u)
raise IndexError("index out of range")
def __setitem__(self,index,item):
for u in self.all:
if index < len(u):
u[index] = item
return
index = index - len(u)
raise IndexError("index out of range")
def add(self, name, unittype):
if unittype == "unit":
self.units.add(name)
elif unittype == "ally":
self.allies.add(name)
elif unittype == "battalion":
self.battalions.add(name)
else:
raise KeyError('Invalid unit type')
def points(self):
x=0
for u in self.all:
x = x + u.points()
return x
def unitsize(self):
x=0
for u in self.all:
x = x + u.unitsize()
return x
def wounds(self):
x=0
for u in self.all:
x = x + u.wounds()
return x
def _bravery_sum(self):
x = 0
for u in self.all:
x = x + u._bravery_sum()
return x
def _save_mul_wounds_sum(self):
x = 0
for u in self.all:
x = x + u._save_mul_wounds_sum()
return x
def avg_bravery_per_unit(self):
count = self.unitsize()
if count == 0:
return 0
return self._bravery_sum() / float(count)
def avg_save_per_wound(self):
count = self.wounds()
if count == 0:
return 0
return self._save_mul_wounds_sum() / float(count)
def sum_roles(self, rules_config={}):
r = {}
for rulename, ruleactions in rules_config.get("units",{}).iteritems():
r[rulename] = 0
for u in self.all:
u.sum_roles(r)
return r
def sum_roles_str(self, rules_config={}):
roles = self.sum_roles(rules_config)
line = []
for role, count in roles.iteritems():
rule=rules_config.get("units",{}).get(role,{})
if rule:
if rule["max"] == -1:
line.append("{} {} [{}+]".format(count, role, rule["min"]))
else:
line.append("{} {} [{}->{}]".format(count, role, rule["min"], rule["max"]))
else:
line.append("{} {}".format(count, role))
return ", ".join(line)
def __check_min_max(self, constraint, current_value, default_min, default_max, restrict_config, final, showfails):
con_min = restrict_config.get("min_"+constraint, default_min)
con_max = restrict_config.get("max_"+constraint, default_max)
if (current_value > con_max and con_max != -1) or ( final and current_value < con_min):
if showfails.value > PrintOption.SILENT.value:
print "FAIL {}: {} {}->{} : {}".format(constraint, current_value, con_min, con_max, self)
return False
return True
def is_valid(self, rules_config, restrict_config={}, final=True, showfails=PrintOption.SILENT):
if not self.__check_min_max("points", self.points(), rules_config["points"], rules_config["points"], restrict_config, final, showfails):
return False
if not self.__check_min_max("wounds", self.wounds(), 0, -1, restrict_config, final, showfails):
return False
if not self.__check_min_max("models", self.unitsize(), 0, -1, restrict_config, final, showfails):
return False
if not self.__check_min_max("allies", self.allies.points(), 0, rules_config["allies"], restrict_config, final, showfails):
return False
#Default battalions to off until better support added
if not self.__check_min_max("battalions", self.battalions.num(), 0, 0, restrict_config, final, showfails):
return False
for u in self.all:
if not u.is_valid(restrict_config, final, showfails):
return False
# Check roles
rules_check = self.sum_roles(rules_config)
for role, count in rules_check.iteritems():
# Check role meets min requirements
if final and count < rules_config["units"][role]["min"]:
if showfails.value > PrintOption.SILENT.value:
print "FAIL Role MIN {} {} {} : {}".format(role, rules_config["units"][role]["min"], count, self)
return False
# Check role meets max requirements
if rules_config["units"][role]["max"] != -1 and count >rules_config["units"][role]["max"]:
if showfails.value > PrintOption.SILENT.value:
print "FAIL Role MAX {} {} {} : {}".format(role, rules_config["units"][role]["max"], count, self)
return False
return True
|
{"/army_generator.py": ["/armiesofsigmar/__init__.py"]}
|
27,518
|
a74nh/armiesofsigmar
|
refs/heads/master
|
/armiesofsigmar/unit.py
|
import re
from printoption import PrintOption
# A unit consists of a number of instances of a single model.
class Unit(object):
def __init__(self, unit_config, unittype):
# Dictionary holding all the stats for a unit
self.unit_config = unit_config
# The number of multiples of a minimum sized unit.
# For example, Drayds has a minimum unit size of 10.
# Therefore, 20 Dryads would have a count of 2.
# Technically, you could have 18 Dryads in a unit, but
# the cost would still be the same as 20. Therefore this
# system disallows that.
self.count = 0
# Type of the unit. Main unit, ally or battalion
self.unittype = unittype
def __str__(self):
if self.count == 0:
return ""
if self.unitsize() > 1:
return "{} {} ({})".format(self.unitsize(),
self.name(),
self.points())
return "{} ({})".format(self.name(),
self.points())
def __repr__(self):
return "{} {} ({})".format(self.unitsize(),
self.name(),
self.points())
def fullstr(self, tabs=1, points=True):
tabs_str = "\t" * tabs
ret = []
if self.count == 0:
return ""
if self.unitsize() > 1:
ret.append("{}{} {} ({} units)".format(tabs_str, self.unitsize(), self.name(), self.count))
else:
ret.append("{}{} {}".format(tabs_str, self.unitsize(), self.name()))
tabs_str = "\t" * (tabs+1)
if points:
ret.append("{}Points: {}".format(tabs_str, self.points()))
if self.roles():
ret.append("{}Roles: {}".format(tabs_str, ", ".join(self.roles())))
ret.append("{}M/W/S/B: {}/{}/{}/{}".format(tabs_str,
self.move(),
self.wounds_str(),
self.save_str(),
self.bravery()))
return "\n".join(ret)
def str_battalion(self):
if self.count == 0:
return ""
if self.count > 1:
return "{} {}".format(self.unitsize(), self.name())
return "{}".format(self.name())
# Increase the multiples of minimum size in the unit
def inc(self, num):
self.count = self.count + num
if self.count < 0:
self.count = 0
def is_type(self, unittype):
return self.unittype == unittype
# The number of individual figures in the unit.
# Always a multiple of unit minimum size.
def unitsize(self):
return self.unit_config["min"] * self.count
def points(self):
# Config points are per minimum unit
return self.unit_config["points"] * self.count
def name(self):
return self.unit_config["name"]
def is_unique(self):
return self.unit_config.get("unique", False)
def roles(self):
return self.unit_config.get("roles", [])
def keywords(self):
return self.unit_config.get("keywords", [])
def move(self, wounds_suffered=0):
move = self.unit_config.get("move", 0)
if type(move) is not dict:
return move
if wounds_suffered > self.wounds_per_unit():
wounds_suffered = self.wounds_per_unit()
while wounds_suffered > 0 and move.get(wounds_suffered, None) == None:
wounds_suffered = wounds_suffered - 1
return "{}*".format(move.get(wounds_suffered, 0))
def wounds_per_unit(self):
return self.unit_config.get("wounds", 0)
# Total number of wounds across all units
def total_wounds(self):
return self.wounds_per_unit() * self.unitsize()
def wounds_str(self):
wounds = self.wounds_per_unit()
if self.unitsize() == 1:
return str(wounds)
return "{}({})".format(wounds, wounds * self.unitsize())
def save(self):
save = self.unit_config.get("save", 0)
if type(save) is str and save == "-":
return 6
return save
def save_str(self):
save = self.unit_config.get("save", 0)
if type(save) is str:
return save
return "{}+".format(save)
def bravery(self):
return self.unit_config.get("bravery", 0)
def is_valid(self, restrict_unit, restrict_keywords, final=True, showfails=PrintOption.SILENT):
# Check unit meets min restriction
if final and self.count < restrict_unit["min"]:
if showfails.value > PrintOption.SILENT.value:
print "FAIL MIN restrict {} {} {} : {}".format(self.name(), restrict_unit["min"], self.count, self)
return False
if self.count == 0:
return True
# Check unit meets max restriction
if restrict_unit["max"] != -1 and self.count >restrict_unit["max"]:
if showfails.value > PrintOption.SILENT.value:
print "FAIL MAX restrict {} {} {} : {}".format(self.name(), restrict_unit["max"], self.count, self)
return False
# Only allow 1 of each unique model
if self.is_unique() and self.count > 1 :
if showfails.value > PrintOption.SILENT.value:
print "FAIL unique {} {} : {}".format(self.name(), self.count, self)
return False
# Check keyword match. Empty list means allow anything
match = False
if not restrict_keywords:
match = True
for restrict_keyword in restrict_keywords:
if restrict_keyword in self.keywords():
match = True
if not match:
if showfails.value > PrintOption.SILENT.value:
print "FAIL Keyword restrict: {} {} {} : {}".format(self.name(), self.keywords(), restrict_keywords, self)
return False
return True
|
{"/army_generator.py": ["/armiesofsigmar/__init__.py"]}
|
27,519
|
a74nh/armiesofsigmar
|
refs/heads/master
|
/armiesofsigmar/configloader.py
|
import os.path
import yaml
import sys
import itertools
SELF_DIR = os.path.dirname(sys.modules[__name__].__file__)
RULEBOOK_LATEST="ghb2017"
DEFAULT_ARMY_SIZE="vanguard"
def _mergeDictsOverwriteEmpty(d1, d2):
res = d1.copy()
for k,v in d2.items():
if k not in d1 or d1[k] == '':
res[k] = v
return res
def load_units(rulebook=RULEBOOK_LATEST, unitlists=["all"], recursive=False):
ret = {"units":[], "allies":[], "battalions": []}
retdict = {}
for faction in unitlists:
filename = os.path.join(SELF_DIR, "units", "{}_{}.yaml".format(rulebook, faction.replace(" ", "_")))
try:
with open(filename, 'r') as f:
book = yaml.load(f)
for sectionname, section in book.iteritems():
if type(section) is str:
loadedsection = load_units(rulebook, [sectionname], recursive)
ret["units"] = ret["units"] + loadedsection["units"]
else:
filenamew = os.path.join(SELF_DIR, "units", "warscrolls_{}.yaml".format(sectionname.replace(" ", "_")))
with open(filenamew, 'r') as fw:
fbook = yaml.load(fw)
for sectiontype in ["units", "battalions"]:
fsection = fbook[sectionname].get(sectiontype, [])
for unit in section.get(sectiontype, []):
for funit in fsection:
if funit["name"] == unit["name"]:
# print funit["name"]
unit.update(funit)
ret[sectiontype] = ret[sectiontype] + section.get(sectiontype,[])
if not recursive:
ret["allies"] = ret["allies"] + section["allies"]
except IOError:
pass
if not recursive:
ret["allies"] = load_units(rulebook, ret["allies"], True)["units"]
for battalion in ret["battalions"]:
new_units = []
for name, config in battalion["units"].iteritems():
for u in itertools.chain(ret["units"], ret["allies"]):
if u["name"] == name:
new_units.append(_mergeDictsOverwriteEmpty(config, u))
continue
battalion["units"]=new_units
# print ret
return ret
def load_restictions(filename):
with open(filename, 'r') as f:
return yaml.load(f)
def load_rules(rulebook=RULEBOOK_LATEST, size=DEFAULT_ARMY_SIZE):
filename = os.path.join(SELF_DIR, "rules", "{}_{}.yaml".format(rulebook, size))
with open(filename, 'r') as f:
return yaml.load(f)
def load_warscrolls(unitlists=["all"]):
ret = []
for faction in unitlists:
filename = os.path.join(SELF_DIR, "units", "warscrolls_{}.yaml".format(faction.replace(" ", "_")))
with open(filename, 'r') as f:
book = yaml.load(f)
for sectionname, section in book.iteritems():
if type(section) is str:
ret = ret + load_warascolls(rulebook, [sectionname])
else:
ret = ret + section
return ret
|
{"/army_generator.py": ["/armiesofsigmar/__init__.py"]}
|
27,520
|
a74nh/armiesofsigmar
|
refs/heads/master
|
/armiesofsigmar/battalions.py
|
from units import Units
from battalion import Battalion
from printoption import PrintOption
import re
class Battalions(Units):
#Each "unit" in a Battalions is a Battalion
def __init__(self, units_config):
self.units_config = units_config
self.units = []
for c in units_config:
self.units.append(Battalion(c))
def sum_roles(self, roles):
for unit in self.units:
if unit.unitsize() > 0:
unit.sum_roles(roles)
def is_valid(self, restrict_config, final=True, showfails=PrintOption.SILENT):
for unit in self.units:
restrict_unit = self._restrict_unit(restrict_config, unit.name(), "battalions")
if not unit.is_valid(restrict_unit, restrict_config, final, showfails):
return False
return True
# Number of enabled battalions
def num(self):
x = 0
for u in self.units:
if u.unitsize() > 0:
x = x + 1
return x
|
{"/army_generator.py": ["/armiesofsigmar/__init__.py"]}
|
27,521
|
a74nh/armiesofsigmar
|
refs/heads/master
|
/armiesofsigmar/units.py
|
import copy
import re
from printoption import PrintOption
from unit import Unit
class Units(object):
def __init__(self, units_config):
self.units_config = units_config
self.units = []
for c in units_config:
self.units.append(Unit(c, "unit"))
def __str__(self):
unitline = []
for unit in self.units:
unitstr = str(unit)
if len(unitstr) > 0:
unitline.append(str(unit))
return ", ".join(sorted(unitline, key=lambda x: re.sub('[^A-Za-z]+', '', x).lower()))
def __repr__(self):
return str(self.units)
def __len__(self):
return len(self.units)
def fullstr(self):
unitline = []
for unit in self.units:
unitstr = unit.fullstr()
if len(unitstr) > 0:
unitline.append(unitstr)
return "\n".join(sorted(unitline, key=lambda x: re.sub('[^A-Za-z]+', '', x).lower()))
def __getitem__(self,index):
if index < len(self.units):
return self.units[index]
raise IndexError("index out of range")
def __setitem__(self,index,item):
if index < len(self.units):
self.units[index] = item
return
raise IndexError("index out of range")
def add(self, name):
for u in self.units:
if u.name() == name:
u.inc(1)
return
raise KeyError('Unknown unit: {}'.format(name))
def unitsize(self):
size = 0
for unit in self.units:
size = size + unit.unitsize()
return size
def points(self):
if self.unitsize() == 0:
return 0
points = 0
for unit in self.units:
points = points + unit.points()
return points
def wounds(self):
wounds = 0
for unit in self.units:
wounds = wounds + unit.total_wounds()
return wounds
def _bravery_sum(self):
x = 0
for u in self.units:
x = x + (u.unitsize() * u.bravery())
return x
def _save_mul_wounds_sum(self):
x = 0
for u in self.units:
x = x + (u.total_wounds() * u.save())
return x
def _restrict_unit(self, restrict_config, name, unittype):
default = { "min": 0, "max": -1 }
#TODO: Battalions restrict by default until better support
if unittype == "battalions":
default = { "min": 0, "max": 0 }
return restrict_config[unittype].get(name, restrict_config[unittype].get("__Others", default))
def sum_roles(self, roles):
for unit in self.units:
if unit.count > 0:
for r in unit.roles():
roles[r] = roles.get(r,0) + unit.count
def is_valid(self, restrict_config, final=True, showfails=PrintOption.SILENT):
for unit in self.units:
restrict_unit = self._restrict_unit(restrict_config, unit.name(), "units")
restrict_keywords = restrict_config.get("keywords", [])
if not unit.is_valid(restrict_unit, restrict_keywords, final, showfails):
return False
return True
|
{"/army_generator.py": ["/armiesofsigmar/__init__.py"]}
|
27,548
|
pharmbio/sciluigi
|
refs/heads/master
|
/sciluigi/__init__.py
|
'''
Scientific Luigi (SciLuigi for short) is a light-weight wrapper library around
Spotify's Luigi workflow system that aims to make writing scientific workflows
(consisting of numerous interdependent commandline applications) more fluent,
flexible and modular.
'''
from sciluigi import interface
from sciluigi.interface import run
from sciluigi.interface import run_local
from sciluigi.interface import LOGFMT_STREAM
from sciluigi.interface import LOGFMT_LUIGI
from sciluigi.interface import LOGFMT_SCILUIGI
from sciluigi.interface import DATEFMT
from sciluigi import audit
from sciluigi.audit import AuditTrailHelpers
from sciluigi import dependencies
from sciluigi.dependencies import TargetInfo
from sciluigi.dependencies import S3TargetInfo
from sciluigi.dependencies import DependencyHelpers
from sciluigi import parameter
from sciluigi.parameter import Parameter
from sciluigi import slurm
from sciluigi.slurm import SlurmInfo
from sciluigi.slurm import SlurmTask
from sciluigi.slurm import SlurmHelpers
from sciluigi.slurm import RUNMODE_LOCAL
from sciluigi.slurm import RUNMODE_HPC
from sciluigi.slurm import RUNMODE_MPI
from sciluigi import task
from sciluigi.task import new_task
from sciluigi.task import Task
from sciluigi.task import ExternalTask
from sciluigi.workflow import WorkflowTask
from sciluigi import util
from sciluigi.util import timestamp
from sciluigi.util import timepath
from sciluigi.util import recordfile_to_dict
from sciluigi.util import dict_to_recordfile
__all__ = ['interface', 'run', 'run_local', 'LOGFMT_STREAM', 'LOGFMT_LUIGI',
'LOGFMT_SCILUIGI', 'DATEFMT', 'audit', 'AuditTrailHelpers',
'dependencies', 'TargetInfo', 'S3TargetInfo', 'DependencyHelpers',
'parameter', 'Parameter', 'slurm', 'SlurmInfo', 'SlurmTask',
'SlurmHelpers', 'RUNMODE_LOCAL', 'RUNMODE_HPC', 'RUNMODE_MPI', 'task',
'new_task', 'Task', 'ExternalTask', 'WorkflowTask', 'util',
'timestamp', 'timepath', 'recordfile_to_dict', 'dict_to_recordfile']
|
{"/sciluigi/__init__.py": ["/sciluigi/interface.py", "/sciluigi/audit.py", "/sciluigi/dependencies.py", "/sciluigi/parameter.py", "/sciluigi/slurm.py", "/sciluigi/task.py", "/sciluigi/workflow.py", "/sciluigi/util.py"], "/examples/example3_workflow.py": ["/sciluigi/__init__.py"], "/test/test_dependencies.py": ["/sciluigi/__init__.py"], "/examples/example1.py": ["/sciluigi/__init__.py"], "/test/test_paramval.py": ["/sciluigi/__init__.py"], "/examples/example4_multiwf.py": ["/sciluigi/__init__.py"], "/sciluigi/slurm.py": ["/sciluigi/parameter.py", "/sciluigi/task.py"], "/sciluigi/task.py": ["/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/workflow.py": ["/sciluigi/__init__.py", "/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/interface.py": ["/sciluigi/util.py"], "/examples/example2_ngi.py": ["/sciluigi/__init__.py"], "/examples/example3_components.py": ["/sciluigi/__init__.py"]}
|
27,549
|
pharmbio/sciluigi
|
refs/heads/master
|
/setup.py
|
import os
import sys
try:
from setuptools import setup
except:
from distutils.core import setup
readme_note = '''\
.. note::
For the latest source, issues and discussion, etc, please visit the
`GitHub repository <https://github.com/pharmbio/sciluigi>`_\n\n
'''
with open('README.rst') as fobj:
long_description = readme_note + fobj.read()
setup(
name='sciluigi',
version='0.10.1',
description='Helper library for writing dynamic, flexible workflows in luigi',
long_description=long_description,
author='Samuel Lampa',
author_email='samuel.lampa@rilnet.com',
url='https://github.com/pharmbio/sciluigi',
license='MIT',
keywords='workflows workflow pipeline luigi',
packages=[
'sciluigi',
],
install_requires=[
'luigi',
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry',
],
)
|
{"/sciluigi/__init__.py": ["/sciluigi/interface.py", "/sciluigi/audit.py", "/sciluigi/dependencies.py", "/sciluigi/parameter.py", "/sciluigi/slurm.py", "/sciluigi/task.py", "/sciluigi/workflow.py", "/sciluigi/util.py"], "/examples/example3_workflow.py": ["/sciluigi/__init__.py"], "/test/test_dependencies.py": ["/sciluigi/__init__.py"], "/examples/example1.py": ["/sciluigi/__init__.py"], "/test/test_paramval.py": ["/sciluigi/__init__.py"], "/examples/example4_multiwf.py": ["/sciluigi/__init__.py"], "/sciluigi/slurm.py": ["/sciluigi/parameter.py", "/sciluigi/task.py"], "/sciluigi/task.py": ["/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/workflow.py": ["/sciluigi/__init__.py", "/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/interface.py": ["/sciluigi/util.py"], "/examples/example2_ngi.py": ["/sciluigi/__init__.py"], "/examples/example3_components.py": ["/sciluigi/__init__.py"]}
|
27,550
|
pharmbio/sciluigi
|
refs/heads/master
|
/sciluigi/util.py
|
'''
This module contains utility methods that are used in various places across the
sciluigi library
'''
import csv
import os
import time
def timestamp(datefmt='%Y-%m-%d, %H:%M:%S'):
'''
Create timestamp as a formatted string.
'''
return time.strftime(datefmt, time.localtime())
def timepath(sep='_'):
'''
Create timestmap, formatted for use in file names.
'''
return timestamp('%Y%m%d{sep}%H%M%S'.format(sep=sep))
def timelog():
'''
Create time stamp for use in log files.
'''
return timestamp('[%Y-%m-%d %H:%M:%S]')
def ensuredir(dirpath):
'''
Ensure directory exists.
'''
if not os.path.exists(dirpath):
os.makedirs(dirpath)
RECORDFILE_DELIMITER = ':'
def recordfile_to_dict(filehandle):
'''
Convert a record file to a dictionary.
'''
csvrd = csv.reader(filehandle, delimiter=RECORDFILE_DELIMITER, skipinitialspace=True)
records = {}
for row in csvrd:
records[row[0]] = row[1]
return records
def dict_to_recordfile(filehandle, records):
'''
Convert a dictionary to a recordfile.
'''
csvwt = csv.writer(filehandle, delimiter=RECORDFILE_DELIMITER, skipinitialspace=True)
rows = []
for key, val in records.items():
rows.append([key, val])
csvwt.writerows(rows)
|
{"/sciluigi/__init__.py": ["/sciluigi/interface.py", "/sciluigi/audit.py", "/sciluigi/dependencies.py", "/sciluigi/parameter.py", "/sciluigi/slurm.py", "/sciluigi/task.py", "/sciluigi/workflow.py", "/sciluigi/util.py"], "/examples/example3_workflow.py": ["/sciluigi/__init__.py"], "/test/test_dependencies.py": ["/sciluigi/__init__.py"], "/examples/example1.py": ["/sciluigi/__init__.py"], "/test/test_paramval.py": ["/sciluigi/__init__.py"], "/examples/example4_multiwf.py": ["/sciluigi/__init__.py"], "/sciluigi/slurm.py": ["/sciluigi/parameter.py", "/sciluigi/task.py"], "/sciluigi/task.py": ["/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/workflow.py": ["/sciluigi/__init__.py", "/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/interface.py": ["/sciluigi/util.py"], "/examples/example2_ngi.py": ["/sciluigi/__init__.py"], "/examples/example3_components.py": ["/sciluigi/__init__.py"]}
|
27,551
|
pharmbio/sciluigi
|
refs/heads/master
|
/examples/example3_workflow.py
|
import logging
import luigi
import sciluigi as sl
from example3_components import T1, Merge
log = logging.getLogger("sciluigi-interface")
class TestWF(sl.WorkflowTask):
task = luigi.Parameter()
def workflow(self):
t1a = self.new_task("t1a", T1, text="hej_hopp")
t1b = self.new_task("t1b", T1, text="hopp_hej")
mrg1 = self.new_task("mrg1", Merge)
mrg2 = self.new_task("mrg2", Merge)
# Workflow definition
mrg1.in_data1 = t1a.out_data1
mrg1.in_data2 = t1b.out_data1
mrg2.in_data1 = t1b.out_data1
mrg2.in_data2 = t1a.out_data1
for name, instance in locals().items():
if issubclass(type(instance), sl.Task):
log.info(
f"{name}, task id: {instance.task_id}\n{name}, hash: {instance.__hash__()}"
)
return locals()[self.task]
if __name__ == "__main__":
sl.run_local(main_task_cls=TestWF, cmdline_args=["--task=mrg2"])
|
{"/sciluigi/__init__.py": ["/sciluigi/interface.py", "/sciluigi/audit.py", "/sciluigi/dependencies.py", "/sciluigi/parameter.py", "/sciluigi/slurm.py", "/sciluigi/task.py", "/sciluigi/workflow.py", "/sciluigi/util.py"], "/examples/example3_workflow.py": ["/sciluigi/__init__.py"], "/test/test_dependencies.py": ["/sciluigi/__init__.py"], "/examples/example1.py": ["/sciluigi/__init__.py"], "/test/test_paramval.py": ["/sciluigi/__init__.py"], "/examples/example4_multiwf.py": ["/sciluigi/__init__.py"], "/sciluigi/slurm.py": ["/sciluigi/parameter.py", "/sciluigi/task.py"], "/sciluigi/task.py": ["/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/workflow.py": ["/sciluigi/__init__.py", "/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/interface.py": ["/sciluigi/util.py"], "/examples/example2_ngi.py": ["/sciluigi/__init__.py"], "/examples/example3_components.py": ["/sciluigi/__init__.py"]}
|
27,552
|
pharmbio/sciluigi
|
refs/heads/master
|
/test/test_dependencies.py
|
import logging
import luigi
import sciluigi as sl
import os
import six.moves as s
import time
import unittest
TESTFILE_PATH = '/tmp/test.out'
log = logging.getLogger('sciluigi-interface')
log.setLevel(logging.WARNING)
class MultiInOutWf(sl.WorkflowTask):
def workflow(self):
mo = self.new_task('mout', MultiOutTask, an_id='x')
mi = self.new_task('min', MultiInTask)
mi.in_multi = mo.out_multi
return mi
class MultiOutTask(sl.Task):
an_id = luigi.Parameter()
def out_multi(self):
paths = ['/tmp/out_%s_%d.txt' % (self.an_id, i) for i in range(10)]
targets = [sl.TargetInfo(self, path) for path in paths]
return targets
def run(self):
for otgt in self.out_multi():
with otgt.open('w') as ofile:
ofile.write('hej')
class MultiInTask(sl.Task):
in_multi = None
def out_multi(self):
return [sl.TargetInfo(self, itgt.path + '.daa.txt') for itgt in self.in_multi()]
def run(self):
for itgt, otgt in zip(self.in_multi(), self.out_multi()):
with itgt.open() as ifile:
with otgt.open('w') as ofile:
ofile.write(ifile.read() + ' daa')
class TestMultiInOutWorkflow(unittest.TestCase):
def setUp(self):
self.w = luigi.worker.Worker()
def test_methods(self):
wf = sl.WorkflowTask()
touta = wf.new_task('tout', MultiOutTask,
an_id='a')
toutb = wf.new_task('tout', MultiOutTask,
an_id='b')
toutc = wf.new_task('tout', MultiOutTask,
an_id='c')
tin = wf.new_task('tout', MultiInTask)
tin.in_multi = [touta.out_multi, {'a': toutb.out_multi, 'b': toutc.out_multi()}]
# Assert outputs returns luigi targets, or list of luigi targets
outs = touta.output()
self.assertIsInstance(outs, list)
for out in outs:
self.assertIsInstance(out, luigi.Target)
reqs = tin.requires()
self.assertIsInstance(reqs, list)
for req in reqs:
self.assertIsInstance(req, luigi.Task)
def test_workflow(self):
wf = MultiInOutWf()
self.w.add(wf)
self.w.run()
# Assert outputs exists
for p in ['/tmp/out_%s_%d.txt' % (aid, i) for i in s.range(10) for aid in ['x']]:
self.assertTrue(os.path.exists(p))
for p in ['/tmp/out_%s_%d.txt.daa.txt' % (aid, i) for i in s.range(10) for aid in ['x']]:
self.assertTrue(os.path.exists(p))
# Remove
for p in ['/tmp/out_%s_%d.txt' % (aid, i) for i in s.range(10) for aid in ['x']]:
os.remove(p)
for p in ['/tmp/out_%s_%d.txt.daa.txt' % (aid, i) for i in s.range(10) for aid in ['x']]:
os.remove(p)
def tearDown(self):
pass
|
{"/sciluigi/__init__.py": ["/sciluigi/interface.py", "/sciluigi/audit.py", "/sciluigi/dependencies.py", "/sciluigi/parameter.py", "/sciluigi/slurm.py", "/sciluigi/task.py", "/sciluigi/workflow.py", "/sciluigi/util.py"], "/examples/example3_workflow.py": ["/sciluigi/__init__.py"], "/test/test_dependencies.py": ["/sciluigi/__init__.py"], "/examples/example1.py": ["/sciluigi/__init__.py"], "/test/test_paramval.py": ["/sciluigi/__init__.py"], "/examples/example4_multiwf.py": ["/sciluigi/__init__.py"], "/sciluigi/slurm.py": ["/sciluigi/parameter.py", "/sciluigi/task.py"], "/sciluigi/task.py": ["/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/workflow.py": ["/sciluigi/__init__.py", "/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/interface.py": ["/sciluigi/util.py"], "/examples/example2_ngi.py": ["/sciluigi/__init__.py"], "/examples/example3_components.py": ["/sciluigi/__init__.py"]}
|
27,553
|
pharmbio/sciluigi
|
refs/heads/master
|
/examples/example1.py
|
import logging
import luigi
import sciluigi as sl
from subprocess import call
log = logging.getLogger('sciluigi-interface')
# ------------------------------------------------------------------------
# Workflow class(es)
# ------------------------------------------------------------------------
class MyWorkflow(sl.WorkflowTask):
def workflow(self):
rawdata = self.new_task('rawdata', RawData)
atot = self.new_task('atot', AToT)
atot.in_data = rawdata.out_rawdata
return atot
# ------------------------------------------------------------------------
# Task classes
# ------------------------------------------------------------------------
class RawData(sl.ExternalTask):
def out_rawdata(self):
return sl.TargetInfo(self, 'data/acgt.txt')
class AToT(sl.Task):
in_data = None
def out_replatot(self):
return sl.TargetInfo(self, self.in_data().path + '.atot')
# ------------------------------------------------
def run(self):
cmd = 'cat ' + self.in_data().path + ' | sed "s/A/T/g" > ' + self.out_replatot().path
log.info("COMMAND TO EXECUTE: " + cmd)
call(cmd, shell=True)
# Run this file as script
# ------------------------------------------------------------------------
if __name__ == '__main__':
luigi.run(local_scheduler=True, main_task_cls=MyWorkflow)
|
{"/sciluigi/__init__.py": ["/sciluigi/interface.py", "/sciluigi/audit.py", "/sciluigi/dependencies.py", "/sciluigi/parameter.py", "/sciluigi/slurm.py", "/sciluigi/task.py", "/sciluigi/workflow.py", "/sciluigi/util.py"], "/examples/example3_workflow.py": ["/sciluigi/__init__.py"], "/test/test_dependencies.py": ["/sciluigi/__init__.py"], "/examples/example1.py": ["/sciluigi/__init__.py"], "/test/test_paramval.py": ["/sciluigi/__init__.py"], "/examples/example4_multiwf.py": ["/sciluigi/__init__.py"], "/sciluigi/slurm.py": ["/sciluigi/parameter.py", "/sciluigi/task.py"], "/sciluigi/task.py": ["/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/workflow.py": ["/sciluigi/__init__.py", "/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/interface.py": ["/sciluigi/util.py"], "/examples/example2_ngi.py": ["/sciluigi/__init__.py"], "/examples/example3_components.py": ["/sciluigi/__init__.py"]}
|
27,554
|
pharmbio/sciluigi
|
refs/heads/master
|
/test/test_paramval.py
|
import logging
import luigi
import sciluigi as sl
import os
import time
import unittest
log = logging.getLogger('sciluigi-interface')
log.setLevel(logging.WARNING)
class IntParamTask(sl.Task):
an_int_param = luigi.IntParameter()
def out_int_val(self):
return sl.TargetInfo(self, '/tmp/intparamtask_intval_%d.txt' % self.an_int_param)
def run(self):
with self.out_int_val().open('w') as outfile:
outfile.write('%d' % self.an_int_param)
class NonStringParamWF(sl.WorkflowTask):
def workflow(self):
intparam_task = self.new_task('intparam_task', IntParamTask,
an_int_param = 123)
return intparam_task
class TestNonStringParameterValues(unittest.TestCase):
def setUp(self):
self.w = luigi.worker.Worker()
self.nsp_wf = NonStringParamWF(instance_name='nonstring_param_wf')
self.w.add(self.nsp_wf)
def test_intparam_gets_set(self):
self.assertEqual(self.nsp_wf._tasks['intparam_task'].an_int_param, 123)
def test_intparam_gets_set(self):
self.w.run()
with self.nsp_wf.workflow().out_int_val().open() as infile:
val = infile.read()
self.assertEqual(val, '123')
def tearDown(self):
pass
|
{"/sciluigi/__init__.py": ["/sciluigi/interface.py", "/sciluigi/audit.py", "/sciluigi/dependencies.py", "/sciluigi/parameter.py", "/sciluigi/slurm.py", "/sciluigi/task.py", "/sciluigi/workflow.py", "/sciluigi/util.py"], "/examples/example3_workflow.py": ["/sciluigi/__init__.py"], "/test/test_dependencies.py": ["/sciluigi/__init__.py"], "/examples/example1.py": ["/sciluigi/__init__.py"], "/test/test_paramval.py": ["/sciluigi/__init__.py"], "/examples/example4_multiwf.py": ["/sciluigi/__init__.py"], "/sciluigi/slurm.py": ["/sciluigi/parameter.py", "/sciluigi/task.py"], "/sciluigi/task.py": ["/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/workflow.py": ["/sciluigi/__init__.py", "/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/interface.py": ["/sciluigi/util.py"], "/examples/example2_ngi.py": ["/sciluigi/__init__.py"], "/examples/example3_components.py": ["/sciluigi/__init__.py"]}
|
27,555
|
pharmbio/sciluigi
|
refs/heads/master
|
/sciluigi/parameter.py
|
'''
This module contains a sciluigi subclass of luigi's Parameter, where
custom functionality might be added in the future.
'''
import luigi
class Parameter(luigi.Parameter):
'''
Subclass of luigi's Parameter, where custom functionality might be added in the future.
'''
pass
|
{"/sciluigi/__init__.py": ["/sciluigi/interface.py", "/sciluigi/audit.py", "/sciluigi/dependencies.py", "/sciluigi/parameter.py", "/sciluigi/slurm.py", "/sciluigi/task.py", "/sciluigi/workflow.py", "/sciluigi/util.py"], "/examples/example3_workflow.py": ["/sciluigi/__init__.py"], "/test/test_dependencies.py": ["/sciluigi/__init__.py"], "/examples/example1.py": ["/sciluigi/__init__.py"], "/test/test_paramval.py": ["/sciluigi/__init__.py"], "/examples/example4_multiwf.py": ["/sciluigi/__init__.py"], "/sciluigi/slurm.py": ["/sciluigi/parameter.py", "/sciluigi/task.py"], "/sciluigi/task.py": ["/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/workflow.py": ["/sciluigi/__init__.py", "/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/interface.py": ["/sciluigi/util.py"], "/examples/example2_ngi.py": ["/sciluigi/__init__.py"], "/examples/example3_components.py": ["/sciluigi/__init__.py"]}
|
27,556
|
pharmbio/sciluigi
|
refs/heads/master
|
/sciluigi/audit.py
|
'''
This module contains functionality for the audit-trail logging functionality
'''
import logging
import luigi
import os
import random
import time
# ==============================================================================
log = logging.getLogger('sciluigi-interface')
# ==============================================================================
class AuditTrailHelpers(object):
'''
Mixin for luigi.Task:s, with functionality for writing audit logs of running tasks
'''
def add_auditinfo(self, infotype, infoval):
'''
Alias to _add_auditinfo(), that can be overridden.
'''
return self._add_auditinfo(self.instance_name, infotype, infoval)
def _add_auditinfo(self, instance_name, infotype, infoval):
'''
Save audit information in a designated file, specific for this task.
'''
dirpath = self.workflow_task.get_auditdirpath()
if not os.path.isdir(dirpath):
time.sleep(random.random())
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
auditfile = os.path.join(dirpath, instance_name)
if not os.path.exists(auditfile):
with open(auditfile, 'w') as afile:
afile.write('[%s]\n' % self.instance_name)
with open(auditfile, 'a') as afile:
afile.write('%s: %s\n' % (infotype, infoval))
def get_instance_name(self):
'''
Return the luigi instance_name
'''
instance_name = None
if self.instance_name is not None:
instance_name = self.instance_name
else:
instance_name = self.task_id
return instance_name
@luigi.Task.event_handler(luigi.Event.START)
def save_start_time(self):
'''
Log start of execution of task.
'''
if hasattr(self, 'workflow_task') and self.workflow_task is not None:
msg = 'Task {task} started'.format(
task=self.get_instance_name())
log.info(msg)
@luigi.Task.event_handler(luigi.Event.PROCESSING_TIME)
def save_end_time(self, task_exectime_sec):
'''
Log end of execution of task, with execution time.
'''
if hasattr(self, 'workflow_task') and self.workflow_task is not None:
msg = 'Task {task} finished after {proctime:.3f}s'.format(
task=self.get_instance_name(),
proctime=task_exectime_sec)
log.info(msg)
self.add_auditinfo('task_exectime_sec', '%.3f' % task_exectime_sec)
for paramname, paramval in self.param_kwargs.items():
if paramname not in ['workflow_task']:
self.add_auditinfo(paramname, paramval)
|
{"/sciluigi/__init__.py": ["/sciluigi/interface.py", "/sciluigi/audit.py", "/sciluigi/dependencies.py", "/sciluigi/parameter.py", "/sciluigi/slurm.py", "/sciluigi/task.py", "/sciluigi/workflow.py", "/sciluigi/util.py"], "/examples/example3_workflow.py": ["/sciluigi/__init__.py"], "/test/test_dependencies.py": ["/sciluigi/__init__.py"], "/examples/example1.py": ["/sciluigi/__init__.py"], "/test/test_paramval.py": ["/sciluigi/__init__.py"], "/examples/example4_multiwf.py": ["/sciluigi/__init__.py"], "/sciluigi/slurm.py": ["/sciluigi/parameter.py", "/sciluigi/task.py"], "/sciluigi/task.py": ["/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/workflow.py": ["/sciluigi/__init__.py", "/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/interface.py": ["/sciluigi/util.py"], "/examples/example2_ngi.py": ["/sciluigi/__init__.py"], "/examples/example3_components.py": ["/sciluigi/__init__.py"]}
|
27,557
|
pharmbio/sciluigi
|
refs/heads/master
|
/examples/example4_multiwf.py
|
'''
An example showing how you can run multiple workflow tasks, from a "Meta workflow" (MetaWF below)
'''
import sciluigi as sl
import luigi
class MetaWF(sl.WorkflowTask):
'''
Meta workflow
'''
def workflow(self):
tasks = []
for r in ['bar', 'tjo', 'hej']:
wf = self.new_task('wf', WF, replacement=r)
tasks.append(wf)
return tasks
class WF(sl.WorkflowTask):
'''
Main workflow, which is run in multiple instances above
'''
replacement = luigi.Parameter()
def workflow(self):
t1 = self.new_task('foowriter', FooWriter)
t2 = self.new_task('foo2bar', Foo2Bar, replacement=self.replacement)
t2.in_foo = t1.out_foo
return t2
class FooWriter(sl.Task):
'''
A dummy task
'''
def out_foo(self):
return sl.TargetInfo(self, 'foo.txt')
def run(self):
self.ex('echo foo > {foo}'.format(
foo=self.out_foo().path))
class Foo2Bar(sl.Task):
'''
Another dummy task
'''
replacement = luigi.Parameter()
in_foo = sl.TargetInfo(None, 'None')
def out_bar(self):
return sl.TargetInfo(self, self.in_foo().path + '.{r}.txt'.format(r=self.replacement))
def run(self):
self.ex('sed "s/foo/{r}/g" {inf} > {outf}'.format(
r=self.replacement,
inf=self.in_foo().path,
outf=self.out_bar().path)
)
# Run as script
if __name__ == '__main__':
sl.run_local()
|
{"/sciluigi/__init__.py": ["/sciluigi/interface.py", "/sciluigi/audit.py", "/sciluigi/dependencies.py", "/sciluigi/parameter.py", "/sciluigi/slurm.py", "/sciluigi/task.py", "/sciluigi/workflow.py", "/sciluigi/util.py"], "/examples/example3_workflow.py": ["/sciluigi/__init__.py"], "/test/test_dependencies.py": ["/sciluigi/__init__.py"], "/examples/example1.py": ["/sciluigi/__init__.py"], "/test/test_paramval.py": ["/sciluigi/__init__.py"], "/examples/example4_multiwf.py": ["/sciluigi/__init__.py"], "/sciluigi/slurm.py": ["/sciluigi/parameter.py", "/sciluigi/task.py"], "/sciluigi/task.py": ["/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/workflow.py": ["/sciluigi/__init__.py", "/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/interface.py": ["/sciluigi/util.py"], "/examples/example2_ngi.py": ["/sciluigi/__init__.py"], "/examples/example3_components.py": ["/sciluigi/__init__.py"]}
|
27,558
|
pharmbio/sciluigi
|
refs/heads/master
|
/sciluigi/slurm.py
|
'''
This module contains functionality related to integration with the SLURM HPC
resource manger.
'''
import datetime
import logging
import re
import time
import sciluigi.parameter
import sciluigi.task
import subprocess as sub
# ================================================================================
# Setup logging
log = logging.getLogger('sciluigi-interface')
# A few 'constants'
RUNMODE_LOCAL = 'runmode_local'
RUNMODE_HPC = 'runmode_hpc'
RUNMODE_MPI = 'runmode_mpi'
# ================================================================================
class SlurmInfo():
'''
A data object for keeping slurm run parameters.
'''
runmode = None # One of RUNMODE_LOCAL|RUNMODE_HPC|RUNMODE_MPI
project = None
partition = None
cores = None
time = None
jobname = None
threads = None
def __init__(self, runmode, project, partition, cores, time, jobname, threads):
'''
Init a SlurmInfo object, from string data.
Time is on format: [[[d-]HH:]MM:]SS
'''
self.runmode = runmode
self.project = project
self.partition = partition
self.cores = cores
self.time = time
self.jobname = jobname
self.threads = threads
def __str__(self):
'''
Return a readable string representation of the info stored
'''
strrepr = ('(time: {t}, '
'partition: {pt}, '
'cores: {c}, '
'threads: {thr}, '
'jobname: {j}, '
'project: {pr})').format(
t=self.time,
pt=self.partition,
c=self.cores,
thr=self.threads,
j=self.jobname,
pr=self.project)
return strrepr
def get_argstr_hpc(self):
'''
Return a formatted string with arguments and option flags to SLURM
commands such as salloc and sbatch, for non-MPI, HPC jobs.
'''
argstr = ' -A {pr} -p {pt} -n {c} -t {t} -J {j} srun -n 1 -c {thr} '.format(
pr=self.project,
pt=self.partition,
c=self.cores,
t=self.time,
j=self.jobname,
thr=self.threads)
return argstr
def get_argstr_mpi(self):
'''
Return a formatted string with arguments and option flags to SLURM
commands such as salloc and sbatch, for MPI jobs.
'''
argstr = ' -A {pr} -p {pt} -n {c1} -t {t} -J {j} mpirun -v -np {c2} '.format(
pr=self.project,
pt=self.partition,
c1=self.cores,
t=self.time,
j=self.jobname,
c2=self.cores)
return argstr
# ================================================================================
class SlurmInfoParameter(sciluigi.parameter.Parameter):
'''
A specialized luigi parameter, taking SlurmInfo objects.
'''
def parse(self, x):
if isinstance(x, SlurmInfo):
return x
else:
raise Exception('Parameter is not instance of SlurmInfo: %s' % x)
# ================================================================================
class SlurmHelpers():
'''
Mixin with various convenience methods for executing jobs via SLURM
'''
# Other class-fields
slurminfo = SlurmInfoParameter(default=None) # Class: SlurmInfo
# Main Execution methods
def ex(self, command):
'''
Execute either locally or via SLURM, depending on config
'''
if isinstance(command, list):
command = ' '.join(command)
if self.slurminfo.runmode == RUNMODE_LOCAL:
log.info('Executing command in local mode: %s', command)
self.ex_local(command) # Defined in task.py
elif self.slurminfo.runmode == RUNMODE_HPC:
log.info('Executing command in HPC mode: %s', command)
self.ex_hpc(command)
elif self.slurminfo.runmode == RUNMODE_MPI:
log.info('Executing command in MPI mode: %s', command)
self.ex_mpi(command)
def ex_hpc(self, command):
'''
Execute command in HPC mode
'''
if isinstance(command, list):
command = sub.list2cmdline(command)
fullcommand = 'salloc %s %s' % (self.slurminfo.get_argstr_hpc(), command)
(retcode, stdout, stderr) = self.ex_local(fullcommand)
self.log_slurm_info(stderr)
return (retcode, stdout, stderr)
def ex_mpi(self, command):
'''
Execute command in HPC mode with MPI support (multi-node, message passing interface).
'''
if isinstance(command, list):
command = sub.list2cmdline(command)
fullcommand = 'salloc %s %s' % (self.slurminfo.get_argstr_mpi(), command)
(retcode, stdout, stderr) = self.ex_local(fullcommand)
self.log_slurm_info(stderr)
return (retcode, stdout, stderr)
# Various convenience methods
def assert_matches_character_class(self, char_class, a_string):
'''
Helper method, that tests whether a string matches a regex character class.
'''
if not bool(re.match('^{c}+$'.format(c=char_class), a_string)):
raise Exception('String {s} does not match character class {cc}'.format(
s=a_string, cc=char_class))
def clean_filename(self, filename):
'''
Clean up a string to make it suitable for use as file name.
'''
return re.sub('[^A-Za-z0-9\_\ ]', '_', str(filename)).replace(' ', '_')
#def get_task_config(self, name):
# return luigi.configuration.get_config().get(self.task_family, name)
def log_slurm_info(self, slurm_stderr):
'''
Parse information of the following example form:
salloc: Granted job allocation 5836263
srun: Job step created
salloc: Relinquishing job allocation 5836263
salloc: Job allocation 5836263 has been revoked.
'''
matches = re.search('[0-9]+', str(slurm_stderr))
if matches:
jobid = matches.group(0)
# Write slurm execution time to audit log
cmd = 'sacct -j {jobid} --noheader --format=elapsed'.format(jobid=jobid)
(_, jobinfo_stdout, _) = self.ex_local(cmd)
sacct_matches = re.findall('([0-9\:\-]+)', str(jobinfo_stdout))
if len(sacct_matches) < 2:
log.warn('Not enough matches from sacct for task %s: %s',
self.instance_name, ', '.join(['Match: %s' % m for m in sacct_matches])
)
else:
slurm_exectime_fmted = sacct_matches[1]
# Date format needs to be handled differently if the days field is included
if '-' in slurm_exectime_fmted:
tobj = time.strptime(slurm_exectime_fmted, '%d-%H:%M:%S')
self.slurm_exectime_sec = int(datetime.timedelta(
tobj.tm_mday,
tobj.tm_sec,
0,
0,
tobj.tm_min,
tobj.tm_hour).total_seconds())
else:
tobj = time.strptime(slurm_exectime_fmted, '%H:%M:%S')
self.slurm_exectime_sec = int(datetime.timedelta(
0,
tobj.tm_sec,
0,
0,
tobj.tm_min,
tobj.tm_hour).total_seconds())
log.info('Slurm execution time for task %s was %ss',
self.instance_name,
self.slurm_exectime_sec)
self.add_auditinfo('slurm_exectime_sec', int(self.slurm_exectime_sec))
# Write this last, so as to get the main task exectime and slurm exectime together in
# audit log later
self.add_auditinfo('slurm_jobid', jobid)
# ================================================================================
class SlurmTask(SlurmHelpers, sciluigi.task.Task):
'''
luigi task that includes the SlurmHelpers mixin.
'''
pass
|
{"/sciluigi/__init__.py": ["/sciluigi/interface.py", "/sciluigi/audit.py", "/sciluigi/dependencies.py", "/sciluigi/parameter.py", "/sciluigi/slurm.py", "/sciluigi/task.py", "/sciluigi/workflow.py", "/sciluigi/util.py"], "/examples/example3_workflow.py": ["/sciluigi/__init__.py"], "/test/test_dependencies.py": ["/sciluigi/__init__.py"], "/examples/example1.py": ["/sciluigi/__init__.py"], "/test/test_paramval.py": ["/sciluigi/__init__.py"], "/examples/example4_multiwf.py": ["/sciluigi/__init__.py"], "/sciluigi/slurm.py": ["/sciluigi/parameter.py", "/sciluigi/task.py"], "/sciluigi/task.py": ["/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/workflow.py": ["/sciluigi/__init__.py", "/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/interface.py": ["/sciluigi/util.py"], "/examples/example2_ngi.py": ["/sciluigi/__init__.py"], "/examples/example3_components.py": ["/sciluigi/__init__.py"]}
|
27,559
|
pharmbio/sciluigi
|
refs/heads/master
|
/sciluigi/task.py
|
'''
This module contains sciluigi's subclasses of luigi's Task class.
'''
import json
import luigi
import logging
import subprocess as sub
import warnings
import sciluigi.audit
import sciluigi.interface
import sciluigi.dependencies
import sciluigi.slurm
log = logging.getLogger('sciluigi-interface')
# ==============================================================================
def new_task(name, cls, workflow_task, **kwargs):
'''
Instantiate a new task. Not supposed to be used by the end-user
(use WorkflowTask.new_task() instead).
'''
slurminfo = None
for key, val in [(key, val) for key, val in kwargs.items()]:
# Handle non-string keys
if not isinstance(key, str):
raise Exception("Key in kwargs to new_task is not string. Must be string: %s" % key)
# Handle non-string values
if isinstance(val, sciluigi.slurm.SlurmInfo):
slurminfo = val
kwargs[key] = val
elif not isinstance(val, str):
try:
kwargs[key] = json.dumps(val) # Force conversion into string
except TypeError:
kwargs[key] = str(val)
kwargs['instance_name'] = name
kwargs['workflow_task'] = workflow_task
kwargs['slurminfo'] = slurminfo
with warnings.catch_warnings():
# We are deliberately hacking Luigi's parameter system to use for
# storing upstream tasks, thus this warning is not really helpful.
warnings.filterwarnings('ignore',
category=UserWarning,
message='Parameter "workflow_task".*is not of type string')
newtask = cls.from_str_params(kwargs)
if slurminfo is not None:
newtask.slurminfo = slurminfo
return newtask
class Task(sciluigi.audit.AuditTrailHelpers, sciluigi.dependencies.DependencyHelpers, luigi.Task):
'''
SciLuigi Task, implementing SciLuigi specific functionality for dependency resolution
and audit trail logging.
'''
workflow_task = luigi.Parameter()
instance_name = luigi.Parameter()
def ex_local(self, command):
'''
Execute command locally (not through resource manager).
'''
# If list, convert to string
if isinstance(command, list):
command = sub.list2cmdline(command)
log.info('Executing command: ' + str(command))
proc = sub.Popen(command, shell=True, stdout=sub.PIPE, stderr=sub.PIPE, text=True)
stdout, stderr = proc.communicate()
retcode = proc.returncode
if len(stderr) > 0:
log.debug('Stderr from command: %s', stderr)
if retcode != 0:
errmsg = ('Command failed (retcode {ret}): {cmd}\n'
'Command output: {out}\n'
'Command stderr: {err}').format(
ret=retcode,
cmd=command,
out=stdout,
err=stderr)
log.error(errmsg)
raise Exception(errmsg)
return (retcode, stdout, stderr)
def ex(self, command):
'''
Execute command. This is a short-hand function, to be overridden e.g. if supporting
execution via SLURM
'''
return self.ex_local(command)
# ==============================================================================
class ExternalTask(
sciluigi.audit.AuditTrailHelpers,
sciluigi.dependencies.DependencyHelpers,
luigi.ExternalTask):
'''
SviLuigi specific implementation of luigi.ExternalTask, representing existing
files.
'''
workflow_task = luigi.Parameter()
instance_name = luigi.Parameter()
|
{"/sciluigi/__init__.py": ["/sciluigi/interface.py", "/sciluigi/audit.py", "/sciluigi/dependencies.py", "/sciluigi/parameter.py", "/sciluigi/slurm.py", "/sciluigi/task.py", "/sciluigi/workflow.py", "/sciluigi/util.py"], "/examples/example3_workflow.py": ["/sciluigi/__init__.py"], "/test/test_dependencies.py": ["/sciluigi/__init__.py"], "/examples/example1.py": ["/sciluigi/__init__.py"], "/test/test_paramval.py": ["/sciluigi/__init__.py"], "/examples/example4_multiwf.py": ["/sciluigi/__init__.py"], "/sciluigi/slurm.py": ["/sciluigi/parameter.py", "/sciluigi/task.py"], "/sciluigi/task.py": ["/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/workflow.py": ["/sciluigi/__init__.py", "/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/interface.py": ["/sciluigi/util.py"], "/examples/example2_ngi.py": ["/sciluigi/__init__.py"], "/examples/example3_components.py": ["/sciluigi/__init__.py"]}
|
27,560
|
pharmbio/sciluigi
|
refs/heads/master
|
/sciluigi/dependencies.py
|
'''
This module contains functionality for dependency resolution for constructing
the dependency graph of workflows.
'''
import luigi
import warnings
from luigi.contrib.postgres import PostgresTarget
from luigi.contrib.s3 import S3Target
# ==============================================================================
class TargetInfo(object):
'''
Class to be used for sending specification of which target, from which
task, to use, when stitching workflow tasks' outputs and inputs together.
'''
task = None
path = None
target = None
def __init__(self, task, path, format=None, is_tmp=False):
self.task = task
self.path = path
self.target = luigi.LocalTarget(path, format, is_tmp)
def open(self, *args, **kwargs):
'''
Forward open method, from luigi's target class
'''
return self.target.open(*args, **kwargs)
# ==============================================================================
class S3TargetInfo(TargetInfo):
def __init__(self, task, path, format=None, client=None):
self.task = task
self.path = path
self.target = S3Target(path, format=format, client=client)
# ==============================================================================
class PostgresTargetInfo(TargetInfo):
def __init__(self, task, host, database, user, password, update_id, table=None, port=None):
self.task = task
self.host = host
self.database = database
self.user = user
self.password = password
self.update_id = update_id
self.table = table
self.port = port
self.target = PostgresTarget(host=host, database=database, user=user, password=password, table=table, update_id=update_id, port=port)
# ==============================================================================
class DependencyHelpers(object):
'''
Mixin implementing methods for supporting dynamic, and target-based
workflow definition, as opposed to the task-based one in vanilla luigi.
'''
# --------------------------------------------------------
# Handle inputs
# --------------------------------------------------------
def requires(self):
'''
Implement luigi API method by returning upstream tasks
'''
return self._upstream_tasks()
def _upstream_tasks(self):
'''
Extract upstream tasks from the TargetInfo objects
or functions returning those (or lists of both the earlier)
for use in luigi's requires() method.
'''
upstream_tasks = set()
for attrname, attrval in self.__dict__.items():
if attrname.startswith('in_'):
upstream_tasks = self._add_upstream_tasks(upstream_tasks, attrval)
return list(upstream_tasks)
def _add_upstream_tasks(self, tasks, new_tasks):
'''
Recursively loop through lists of TargetInfos, or
callables returning TargetInfos, or lists of ...
(repeat recursively) ... and return all tasks.
'''
if callable(new_tasks):
new_tasks = new_tasks()
if isinstance(new_tasks, TargetInfo):
tasks.add(new_tasks.task)
elif isinstance(new_tasks, list):
for new_task in new_tasks:
tasks = self._add_upstream_tasks(tasks, new_task)
elif isinstance(new_tasks, dict):
for _, new_task in new_tasks.items():
tasks = self._add_upstream_tasks(tasks, new_task)
else:
raise Exception('Input value is neither callable, TargetInfo, nor list: %s' % val)
return tasks
# --------------------------------------------------------
# Handle outputs
# --------------------------------------------------------
def output(self):
'''
Implement luigi API method
'''
return self._output_targets()
def _output_targets(self):
'''
Extract output targets from the TargetInfo objects
or functions returning those (or lists of both the earlier)
for use in luigi's output() method.
'''
output_targets = []
for attrname in dir(self):
with warnings.catch_warnings():
# Deliberately suppress this deprecation warning, as we are not
# using the param_args property, only iterating through all
# members of the class, which triggers the deprecation warning
# just because of that.
warnings.filterwarnings('ignore',
category=DeprecationWarning,
message='Use of param_args has been deprecated')
attrval = getattr(self, attrname)
if attrname.startswith('out_'):
output_targets = self._parse_outputitem(attrval, output_targets)
return output_targets
def _parse_outputitem(self, val, targets):
'''
Recursively loop through lists of TargetInfos, or
callables returning TargetInfos, or lists of ...
(repeat recursively) ... and return all targets.
'''
if callable(val):
val = val()
if isinstance(val, TargetInfo):
targets.append(val.target)
elif isinstance(val, list):
for valitem in val:
targets = self._parse_outputitem(valitem, targets)
elif isinstance(val, dict):
for _, valitem in val.items():
targets = self._parse_outputitem(valitem, targets)
else:
raise Exception('Input item is neither callable, TargetInfo, nor list: %s' % val)
return targets
|
{"/sciluigi/__init__.py": ["/sciluigi/interface.py", "/sciluigi/audit.py", "/sciluigi/dependencies.py", "/sciluigi/parameter.py", "/sciluigi/slurm.py", "/sciluigi/task.py", "/sciluigi/workflow.py", "/sciluigi/util.py"], "/examples/example3_workflow.py": ["/sciluigi/__init__.py"], "/test/test_dependencies.py": ["/sciluigi/__init__.py"], "/examples/example1.py": ["/sciluigi/__init__.py"], "/test/test_paramval.py": ["/sciluigi/__init__.py"], "/examples/example4_multiwf.py": ["/sciluigi/__init__.py"], "/sciluigi/slurm.py": ["/sciluigi/parameter.py", "/sciluigi/task.py"], "/sciluigi/task.py": ["/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/workflow.py": ["/sciluigi/__init__.py", "/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/interface.py": ["/sciluigi/util.py"], "/examples/example2_ngi.py": ["/sciluigi/__init__.py"], "/examples/example3_components.py": ["/sciluigi/__init__.py"]}
|
27,561
|
pharmbio/sciluigi
|
refs/heads/master
|
/sciluigi/workflow.py
|
'''
This module contains sciluigi's subclasses of luigi's Task class.
'''
import datetime
import luigi
import logging
import os
import sciluigi
import sciluigi.audit
import sciluigi.interface
import sciluigi.dependencies
import sciluigi.slurm
log = logging.getLogger('sciluigi-interface')
# ==============================================================================
class WorkflowTask(sciluigi.audit.AuditTrailHelpers, luigi.Task):
'''
SciLuigi-specific task, that has a method for implementing a (dynamic) workflow
definition (workflow()).
'''
instance_name = luigi.Parameter(default='sciluigi_workflow')
_tasks = {}
_wfstart = ''
_wflogpath = ''
_hasloggedstart = False
_hasloggedfinish = False
_hasaddedhandler = False
def _ensure_timestamp(self):
'''
Make sure that there is a time stamp for when the workflow started.
'''
if self._wfstart == '':
self._wfstart = datetime.datetime.utcnow().strftime('%Y%m%d_%H%M%S_%f')
def get_wflogpath(self):
'''
Get the path to the workflow-speicfic log file.
'''
if self._wflogpath == '':
self._ensure_timestamp()
clsname = self.__class__.__name__.lower()
logpath = 'log/workflow_' + clsname + '_started_{t}.log'.format(t=self._wfstart)
self._wflogpath = logpath
return self._wflogpath
def get_auditdirpath(self):
'''
Get the path to the workflow-speicfic audit trail directory.
'''
self._ensure_timestamp()
clsname = self.__class__.__name__.lower()
audit_dirpath = 'audit/.audit_%s_%s' % (clsname, self._wfstart)
return audit_dirpath
def get_auditlogpath(self):
'''
Get the path to the workflow-speicfic audit trail file.
'''
self._ensure_timestamp()
clsname = self.__class__.__name__.lower()
audit_dirpath = 'audit/workflow_%s_started_%s.audit' % (clsname, self._wfstart)
return audit_dirpath
def add_auditinfo(self, infotype, infolog):
'''
Add audit information to the audit log.
'''
return self._add_auditinfo(self.__class__.__name__.lower(), infotype, infolog)
def workflow(self):
'''
SciLuigi API methoed. Implement your workflow here, and return the last task(s)
of the dependency graph.
'''
raise WorkflowNotImplementedException(
'workflow() method is not implemented, for ' + str(self))
def requires(self):
'''
Implementation of Luigi API method.
'''
if not self._hasaddedhandler:
wflog_formatter = logging.Formatter(
sciluigi.interface.LOGFMT_STREAM,
sciluigi.interface.DATEFMT)
wflog_file_handler = logging.FileHandler(self.output()['log'].path)
wflog_file_handler.setLevel(logging.INFO)
wflog_file_handler.setFormatter(wflog_formatter)
log.addHandler(wflog_file_handler)
luigilog = logging.getLogger('luigi-interface')
luigilog.addHandler(wflog_file_handler)
self._hasaddedhandler = True
clsname = self.__class__.__name__
if not self._hasloggedstart:
log.info('-'*80)
log.info('SciLuigi: %s Workflow Started (logging to %s)', clsname, self.get_wflogpath())
log.info('-'*80)
self._hasloggedstart = True
workflow_output = self.workflow()
if workflow_output is None:
clsname = self.__class__.__name__
raise Exception(('Nothing returned from workflow() method in the %s Workflow task. '
'Forgot to add a return statement at the end?') % clsname)
return workflow_output
def output(self):
'''
Implementation of Luigi API method
'''
return {'log': luigi.LocalTarget(self.get_wflogpath()),
'audit': luigi.LocalTarget(self.get_auditlogpath())}
def run(self):
'''
Implementation of Luigi API method
'''
if self.output()['audit'].exists():
errmsg = ('Audit file already exists, '
'when trying to create it: %s') % self.output()['audit'].path
log.error(errmsg)
raise Exception(errmsg)
else:
with self.output()['audit'].open('w') as auditfile:
for taskname in sorted(self._tasks):
taskaudit_path = os.path.join(self.get_auditdirpath(), taskname)
if os.path.exists(taskaudit_path):
auditfile.write(open(taskaudit_path).read() + '\n')
clsname = self.__class__.__name__
if not self._hasloggedfinish:
log.info('-'*80)
log.info('SciLuigi: %s Workflow Finished (workflow log at %s)', clsname, self.get_wflogpath())
log.info('-'*80)
self._hasloggedfinish = True
def new_task(self, instance_name, cls, **kwargs):
'''
Create new task instance, and link it to the current workflow.
'''
newtask = sciluigi.new_task(instance_name, cls, self, **kwargs)
self._tasks[instance_name] = newtask
return newtask
# ================================================================================
class WorkflowNotImplementedException(Exception):
'''
Exception to throw if the workflow() SciLuigi API method is not implemented.
'''
pass
|
{"/sciluigi/__init__.py": ["/sciluigi/interface.py", "/sciluigi/audit.py", "/sciluigi/dependencies.py", "/sciluigi/parameter.py", "/sciluigi/slurm.py", "/sciluigi/task.py", "/sciluigi/workflow.py", "/sciluigi/util.py"], "/examples/example3_workflow.py": ["/sciluigi/__init__.py"], "/test/test_dependencies.py": ["/sciluigi/__init__.py"], "/examples/example1.py": ["/sciluigi/__init__.py"], "/test/test_paramval.py": ["/sciluigi/__init__.py"], "/examples/example4_multiwf.py": ["/sciluigi/__init__.py"], "/sciluigi/slurm.py": ["/sciluigi/parameter.py", "/sciluigi/task.py"], "/sciluigi/task.py": ["/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/workflow.py": ["/sciluigi/__init__.py", "/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/interface.py": ["/sciluigi/util.py"], "/examples/example2_ngi.py": ["/sciluigi/__init__.py"], "/examples/example3_components.py": ["/sciluigi/__init__.py"]}
|
27,562
|
pharmbio/sciluigi
|
refs/heads/master
|
/sciluigi/interface.py
|
'''
This module contains mappings of methods that are part of the sciluigi API
'''
import luigi
import logging
import sciluigi.util
LOGFMT_STREAM = '%(asctime)s | %(levelname)8s | %(message)s'
LOGFMT_LUIGI = '%(asctime)s %(levelname)8s LUIGI %(message)s'
LOGFMT_SCILUIGI = '%(asctime)s %(levelname)8s SCILUIGI %(message)s'
DATEFMT = '%Y-%m-%d %H:%M:%S'
def setup_logging():
'''
Set up SciLuigi specific logging
'''
sciluigi.util.ensuredir('log')
log_path = 'log/sciluigi_run_%s_detailed.log' % sciluigi.util.timepath()
# Formatter
stream_formatter = logging.Formatter(LOGFMT_STREAM, DATEFMT)
luigi_log_formatter = logging.Formatter(LOGFMT_LUIGI, DATEFMT)
sciluigi_log_formatter = logging.Formatter(LOGFMT_SCILUIGI, DATEFMT)
# Stream handler (for STDERR)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(stream_formatter)
stream_handler.setLevel(logging.INFO)
# File handler
luigi_file_handler = logging.FileHandler(log_path)
luigi_file_handler.setFormatter(luigi_log_formatter)
luigi_file_handler.setLevel(logging.DEBUG)
sciluigi_file_handler = logging.FileHandler(log_path)
sciluigi_file_handler.setFormatter(sciluigi_log_formatter)
sciluigi_file_handler.setLevel(logging.DEBUG)
# Loggers
luigi_logger = logging.getLogger('luigi-interface')
luigi_logger.addHandler(luigi_file_handler)
luigi_logger.addHandler(stream_handler)
luigi_logger.setLevel(logging.WARN)
sciluigi_logger = logging.getLogger('sciluigi-interface')
sciluigi_logger.addHandler(stream_handler)
sciluigi_logger.addHandler(sciluigi_file_handler)
sciluigi_logger.setLevel(logging.DEBUG)
setup_logging()
def run(*args, **kwargs):
'''
Forwarding luigi's run method
'''
luigi.run(*args, **kwargs)
def run_local(*args, **kwargs):
'''
Forwarding luigi's run method, with local scheduler
'''
run(local_scheduler=True, *args, **kwargs)
|
{"/sciluigi/__init__.py": ["/sciluigi/interface.py", "/sciluigi/audit.py", "/sciluigi/dependencies.py", "/sciluigi/parameter.py", "/sciluigi/slurm.py", "/sciluigi/task.py", "/sciluigi/workflow.py", "/sciluigi/util.py"], "/examples/example3_workflow.py": ["/sciluigi/__init__.py"], "/test/test_dependencies.py": ["/sciluigi/__init__.py"], "/examples/example1.py": ["/sciluigi/__init__.py"], "/test/test_paramval.py": ["/sciluigi/__init__.py"], "/examples/example4_multiwf.py": ["/sciluigi/__init__.py"], "/sciluigi/slurm.py": ["/sciluigi/parameter.py", "/sciluigi/task.py"], "/sciluigi/task.py": ["/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/workflow.py": ["/sciluigi/__init__.py", "/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/interface.py": ["/sciluigi/util.py"], "/examples/example2_ngi.py": ["/sciluigi/__init__.py"], "/examples/example3_components.py": ["/sciluigi/__init__.py"]}
|
27,563
|
pharmbio/sciluigi
|
refs/heads/master
|
/examples/example2_ngi.py
|
import logging
import luigi
import sciluigi as sl
import math
from subprocess import call
import requests
import time
# ------------------------------------------------------------------------
# Init logging
# ------------------------------------------------------------------------
log = logging.getLogger('sciluigi-interface')
# ------------------------------------------------------------------------
# Workflow class
# ------------------------------------------------------------------------
class NGITestWF(sl.WorkflowTask):
task = luigi.Parameter() # Task to return, chosable on commandline
def workflow(self):
# Rsync a folder
rsync = sl.new_task('rsync', RSyncAFolder, self,
src_dir_path='data/afolder',
dest_dir_path='data/afolder_rsynced')
# Run a program that takes 10 minutes (seconds)
run10min = sl.new_task('run10min', Run10MinuteSleep, self)
run10min.in_upstream = rsync.out_destdir
# Do a web request
webreq = sl.new_task('run10min', DoWebRequest, self)
webreq.in_upstream = run10min.out_doneflag
# Split a file
rawdata = sl.new_task('rawdata', ExistingData, self,
file_name='acgt.txt')
split = sl.new_task('run10min', SplitAFile, self)
split.in_data = rawdata.out_acgt
# Run the same task on the two splits
dosth1 = sl.new_task('dosth1', DoSomething, self)
dosth1.in_data = split.out_part1
dosth2 = sl.new_task('dosth2', DoSomething, self)
dosth2.in_data = split.out_part2
# Merge the results
merge = sl.new_task('merge', MergeFiles, self)
merge.in_part1 = dosth1.out_data
merge.in_part2 = dosth2.out_data
return locals()[self.task]
# ------------------------------------------------------------------------
# Task classes
# ------------------------------------------------------------------------
# Rsync a folder
class RSyncAFolder(sl.Task):
# Params
src_dir_path = luigi.Parameter()
dest_dir_path = luigi.Parameter()
# I/O
def out_destdir(self):
return sl.TargetInfo(self, self.dest_dir_path)
# Impl
def run(self):
call('rsync -a {src}/ {dest}/'.format(
src=self.src_dir_path,
dest=self.dest_dir_path),
shell=True)
# Run a program that takes 10 minutes (seconds now, for a try) to run
class Run10MinuteSleep(sl.Task):
# I/O
in_upstream = None
def out_doneflag(self):
return sl.TargetInfo(self, self.in_upstream().path + '.10mintask_done')
# Impl
def run(self):
time.sleep(10)
with self.out_doneflag().open('w') as flagfile:
flagfile.write('Done!')
# Perform a web request
class DoWebRequest(sl.Task):
# I/O
in_upstream = None
def out_doneflag(self):
return sl.TargetInfo(self, self.in_upstream().path + '.webrequest_done')
# Impl
def run(self):
resp = requests.get('http://nbis.se')
if resp.status_code != 200:
log.error('Web request failed!')
else:
with self.out_doneflag().open('w') as flagfile:
flagfile.write('Web Request Task Done!')
class ExistingData(sl.ExternalTask):
# Params
file_name = luigi.Parameter(default='acgt.txt')
# I/O
def out_acgt(self):
return sl.TargetInfo(self, 'data/' + self.file_name)
class SplitAFile(sl.Task):
# I/O
in_data = None
def out_part1(self):
return sl.TargetInfo(self, self.in_data().path + '.part1')
def out_part2(self):
return sl.TargetInfo(self, self.in_data().path + '.part2')
# Impl
def run(self):
cmd = f'wc -l {self.in_data().path}'
_, wc_output, _ = self.ex(cmd)
lines_cnt = int(wc_output.split(' ')[0])
head_cnt = int(math.ceil(lines_cnt / 2))
tail_cnt = int(math.floor(lines_cnt / 2))
cmd_head = 'head -n {cnt} {i} > {part1}'.format(
i=self.in_data().path,
cnt=head_cnt,
part1=self.out_part1().path)
log.info("COMMAND: " + cmd_head)
self.ex(cmd_head)
self.ex('tail -n {cnt} {i} > {part2}'.format(
cnt=tail_cnt,
i=self.in_data().path,
part2=self.out_part2().path))
class DoSomething(sl.Task):
'''
Run the same program on both parts of the split
'''
# I/O
in_data = None
def out_data(self):
return sl.TargetInfo(self, self.in_data().path + '.something_done')
# Impl
def run(self):
with self.in_data().open() as infile, self.out_data().open('w') as outfile:
for line in infile:
outfile.write(line.lower() + '\n')
class MergeFiles(sl.Task):
'''
Merge the results of the programs
'''
# I/O
in_part1 = None
in_part2 = None
def out_merged(self):
return sl.TargetInfo(self, self.in_part1().path + '.merged')
# Impl
def run(self):
self.ex('cat {f1} {f2} > {out}'.format(
f1=self.in_part1().path,
f2=self.in_part2().path,
out=self.out_merged().path))
# ------------------------------------------------------------------------
# Run as script
# ------------------------------------------------------------------------
if __name__ == '__main__':
sl.run_local(main_task_cls=NGITestWF, cmdline_args=['--task=merge'])
|
{"/sciluigi/__init__.py": ["/sciluigi/interface.py", "/sciluigi/audit.py", "/sciluigi/dependencies.py", "/sciluigi/parameter.py", "/sciluigi/slurm.py", "/sciluigi/task.py", "/sciluigi/workflow.py", "/sciluigi/util.py"], "/examples/example3_workflow.py": ["/sciluigi/__init__.py"], "/test/test_dependencies.py": ["/sciluigi/__init__.py"], "/examples/example1.py": ["/sciluigi/__init__.py"], "/test/test_paramval.py": ["/sciluigi/__init__.py"], "/examples/example4_multiwf.py": ["/sciluigi/__init__.py"], "/sciluigi/slurm.py": ["/sciluigi/parameter.py", "/sciluigi/task.py"], "/sciluigi/task.py": ["/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/workflow.py": ["/sciluigi/__init__.py", "/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/interface.py": ["/sciluigi/util.py"], "/examples/example2_ngi.py": ["/sciluigi/__init__.py"], "/examples/example3_components.py": ["/sciluigi/__init__.py"]}
|
27,564
|
pharmbio/sciluigi
|
refs/heads/master
|
/examples/example3_components.py
|
import luigi
import sciluigi as sl
import time
class T1(sl.Task):
# Parameter
text = luigi.Parameter()
# I/O
def out_data1(self):
return sl.TargetInfo(self, 'data/' + self.text + '.txt')
# Implementation
def run(self):
time.sleep(1)
with self.out_data1().open('w') as outfile:
outfile.write(self.text)
# ========================================================================
class Merge(sl.Task):
# I/O
in_data1 = None
in_data2 = None
def out_merged(self):
return sl.TargetInfo(self, self.in_data1().path + '.merged.txt')
# Implementation
def run(self):
time.sleep(2)
with self.in_data1().open() as in1, self.in_data2().open() as in2, self.out_merged().open('w') as outfile:
for row in in1:
outfile.write(row+'\n')
for row in in2:
outfile.write(row+'\n')
|
{"/sciluigi/__init__.py": ["/sciluigi/interface.py", "/sciluigi/audit.py", "/sciluigi/dependencies.py", "/sciluigi/parameter.py", "/sciluigi/slurm.py", "/sciluigi/task.py", "/sciluigi/workflow.py", "/sciluigi/util.py"], "/examples/example3_workflow.py": ["/sciluigi/__init__.py"], "/test/test_dependencies.py": ["/sciluigi/__init__.py"], "/examples/example1.py": ["/sciluigi/__init__.py"], "/test/test_paramval.py": ["/sciluigi/__init__.py"], "/examples/example4_multiwf.py": ["/sciluigi/__init__.py"], "/sciluigi/slurm.py": ["/sciluigi/parameter.py", "/sciluigi/task.py"], "/sciluigi/task.py": ["/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/workflow.py": ["/sciluigi/__init__.py", "/sciluigi/audit.py", "/sciluigi/interface.py", "/sciluigi/dependencies.py", "/sciluigi/slurm.py"], "/sciluigi/interface.py": ["/sciluigi/util.py"], "/examples/example2_ngi.py": ["/sciluigi/__init__.py"], "/examples/example3_components.py": ["/sciluigi/__init__.py"]}
|
27,748
|
sasakalaba/expense_track
|
refs/heads/master
|
/expense_track/api/tests/base.py
|
import json
from datetime import datetime
from django.test import TestCase
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from expense_trackapp.models import Expense
class BaseTestCase(TestCase):
def setUp(self):
# Set users.
self.user = User.objects.create_user(
username='foobar',
email='foo@bar.com',
password='foobar'
)
self.user2 = User.objects.create_user(
username='foobar2',
email='foo@bar2.com',
password='mypassword'
)
# Set objects.
self.now = datetime.now()
self.expense = Expense.objects.create(
amount=float(666), user=self.user, date=self.now.date(), time=self.now.time())
self.expense2 = Expense.objects.create(
amount=float(999), user=self.user2, date=self.now.date(), time=self.now.time())
def assertEndpoint(
self, url_name, method, form_data, expected_response_data,
expected_status_code, url_kwargs=None, manual_check=[],
order=False, query_params=None):
"""
Custom assert for checking endpoint response values and HTTP
status codes.
Manual check is a list of all the values from response data that needs
to be manually checked (like hashed passwords). If there are any
manual check values present, they are returned as a dict.
If specific ordering is required inside returned data for testing
purposes, set order to True.
"""
def sort_dict(dataset1, dataset2):
"""
Checks the dictionaries for any lists, and sorts them.
"""
for value1, value2 in zip(dataset1, dataset2):
if isinstance(dataset1[value1], list):
dataset1[value1].sort()
if isinstance(dataset2[value2], list):
dataset2[value2].sort()
url = reverse(url_name, kwargs=url_kwargs)
allowed_methods = {
'get': self.client.get,
'post': self.client.post,
'put': self.client.put,
'patch': self.client.patch,
'delete': self.client.delete
}
# Convert query_params to url string format.
if query_params:
url_query_string = '&'.join(
['{}={}'.format(k, v) for k, v in query_params.iteritems()])
url = '?'.join([url, url_query_string])
if method in allowed_methods:
response = allowed_methods[method](url, form_data)
else:
raise ValueError('\'%s\' is not a supported method.' % method)
# Separate any data that we need to manually check
manual_data = {}
for check in manual_check:
if check in expected_response_data:
expected_response_data.pop(check)
if check in response.data:
manual_data[check] = response.data.pop(check)
# Convert OrderedDict to dict
response_data = json.loads(json.dumps(response.data))
if response_data:
# Check for pagination
if 'results' in response_data:
response_data = response_data['results']
# If there are lists present, sort them
if isinstance(response_data, list):
response_data.sort(), expected_response_data.sort()
for data1, data2 in zip(response_data, expected_response_data):
if isinstance(data1, dict) and isinstance(data2, dict):
sort_dict(data1, data2)
elif isinstance(response_data, dict) and order:
pass
elif isinstance(response_data, dict):
sort_dict(response_data, expected_response_data)
else:
raise ValueError('Response data must be a list or a dict.')
self.assertEqual(response_data, expected_response_data)
self.assertEqual(response.status_code, expected_status_code)
return manual_data
|
{"/expense_track/api/tests/tests.py": ["/expense_track/api/tests/base.py"], "/expense_track/api/tests/tests_unit.py": ["/expense_track/api/tests/base.py", "/expense_track/api/permissions.py", "/expense_track/api/views.py", "/expense_track/api/serializers.py"], "/expense_track/api/views.py": ["/expense_track/api/permissions.py", "/expense_track/api/filters.py", "/expense_track/api/serializers.py"], "/expense_track/api/urls.py": ["/expense_track/api/views.py"], "/expense_track/expense_trackapp/admin.py": ["/expense_track/expense_trackapp/models.py"]}
|
27,749
|
sasakalaba/expense_track
|
refs/heads/master
|
/expense_track/api/tests/tests.py
|
from datetime import timedelta
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.test import APIClient
from expense_trackapp.models import Expense
from .base import BaseTestCase
class AccountTest(BaseTestCase):
def test_login(self):
"""
Login test.
"""
# Log our user once so a token is generated.
form_data = {'username': 'foobar', 'password': 'foobar'}
self.client.post(reverse('account_login'), form_data)
token = Token.objects.get()
return_data = {
'token': token.key
}
# Ensure that successful login will return a user token.
self.assertEndpoint(
'account_login', 'post', form_data, return_data, status.HTTP_200_OK)
self.assertEqual(token.user, self.user)
# Unsuccessful login 400 error.
form_data = {'username': 'foobar1', 'password': 'foobar'}
return_data = {
'non_field_errors': ['Unable to log in with provided credentials.']
}
self.assertEndpoint(
'account_login', 'post', form_data, return_data, status.HTTP_400_BAD_REQUEST)
def test_register(self):
"""
Register test.
"""
# Ensure that successful register will return a user object.
form_data = {
'username': 'foobar1',
'password': 'foobar1',
'confirm_password': 'foobar1',
'email': 'foo@bar1.com'
}
return_data = {
'username': 'foobar1',
'email': 'foo@bar1.com'
}
self.assertEndpoint(
'account_register', 'post', form_data, return_data, status.HTTP_201_CREATED)
# Register with same username/email error.
return_data = {
'email': ['This field must be unique.'],
'username': ['A user with that username already exists.']
}
self.assertEndpoint(
'account_register', 'post', form_data, return_data, status.HTTP_400_BAD_REQUEST)
# Ensure that all register fields are required.
form_data = {}
return_data = {
'username': ['This field is required.'],
'email': ['This field is required.'],
'password': ['This field is required.']
}
self.assertEndpoint(
'account_register', 'post', form_data, return_data, status.HTTP_400_BAD_REQUEST)
# Mismatching passwords error.
form_data = {
'username': 'foobar3',
'password': 'foobar3',
'confirm_password': 'foobar1',
'email': 'foo@bar3.com'
}
return_data = {
'non_field_errors': ['Passwords must match.']
}
self.assertEndpoint(
'account_register', 'post', form_data, return_data, status.HTTP_400_BAD_REQUEST)
class ExpensesTest(BaseTestCase):
def setUp(self):
super(ExpensesTest, self).setUp()
# Set authorization.
self.token = Token.objects.create(user=self.user)
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
# Set url kwargs.
self.user1_url_kwargs = {
'username': self.user.username,
'pk': self.expense.pk
}
self.user2_url_kwargs = {
'username': self.user2.username,
'pk': self.expense2.pk
}
# Create new expense for self.user.
self.expense3 = Expense.objects.create(
amount=float(333),
user=self.user,
date=self.now.date() - timedelta(days=1),
time=self.now.time()
)
# Set return data.
self.expense_return_data = {
'amount': '666.00',
'comment': '',
'date': str(self.now.date()),
'description': '',
'pk': self.expense.pk,
'time': str(self.now.time()),
'user': 'foobar'
}
self.expense2_return_data = {
'amount': '999.00',
'comment': '',
'date': str(self.now.date()),
'description': '',
'pk': self.expense2.pk,
'time': str(self.now.time()),
'user': 'foobar2'
}
self.expense3_return_data = {
'amount': '333.00',
'comment': '',
'date': str(self.now.date() - timedelta(days=1)),
'description': '',
'pk': self.expense3.pk,
'time': str(self.now.time()),
'user': 'foobar'
}
def test_list(self):
"""
List of expenses test.
"""
form_data = {}
return_data = [self.expense_return_data, self.expense3_return_data]
# Set url kwargs.
self.user1_url_kwargs.pop('pk')
self.user2_url_kwargs.pop('pk')
# User can GET only its own records.
self.assertEndpoint(
'expense_list',
'get',
form_data,
return_data,
status.HTTP_200_OK,
url_kwargs=self.user1_url_kwargs,
)
self.assertEndpoint(
'expense_list',
'get',
form_data,
{'detail': 'You do not have permission to perform this action.'},
status.HTTP_403_FORBIDDEN,
url_kwargs=self.user2_url_kwargs
)
# Admin can GET everything.
self.user.is_superuser = True
self.user.save()
return_data.append(self.expense2_return_data)
self.assertEndpoint(
'expense_list',
'get',
form_data,
return_data,
status.HTTP_200_OK,
url_kwargs=self.user1_url_kwargs,
)
def test_list_filter_date(self):
"""
Filtered expense list test.
"""
form_data = {}
return_data = [self.expense_return_data, self.expense3_return_data]
# Set url kwargs.
self.user1_url_kwargs.pop('pk')
self.user2_url_kwargs.pop('pk')
# Set query parameters.
query_params = {
'date_0': str(self.now.date() - timedelta(days=2)),
'date_1': str(self.now.date())
}
# Filter all todays and yesterdays dates.
self.assertEndpoint(
'expense_list',
'get',
form_data,
return_data,
status.HTTP_200_OK,
url_kwargs=self.user1_url_kwargs,
query_params=query_params
)
# Filter only yesterdays dates.
query_params = {
'date_0': str(self.now.date() - timedelta(days=2)),
'date_1': str(self.now.date() - timedelta(days=1))
}
return_data = [self.expense3_return_data, ]
self.assertEndpoint(
'expense_list',
'get',
form_data,
return_data,
status.HTTP_200_OK,
url_kwargs=self.user1_url_kwargs,
query_params=query_params
)
def test_list_filter_time(self):
"""
Filtered expense list test.
"""
form_data = {}
return_data = [self.expense_return_data, self.expense3_return_data]
new_time = (self.now - timedelta(hours=2)).time()
self.expense3.time = new_time
self.expense3.save()
self.expense3_return_data['time'] = str(new_time)
# Set url kwargs.
self.user1_url_kwargs.pop('pk')
self.user2_url_kwargs.pop('pk')
# Set query parameters.
query_params = {
'time_0': str((self.now - timedelta(hours=3)).time()),
'time_1': str(self.now.time())
}
# Filter all times.
self.assertEndpoint(
'expense_list',
'get',
form_data,
return_data,
status.HTTP_200_OK,
url_kwargs=self.user1_url_kwargs,
query_params=query_params
)
# Filter only time from an hour ago.
query_params = {
'time_0': str((self.now - timedelta(hours=1)).time()),
'time_1': str(self.now.time())
}
return_data = [self.expense_return_data, ]
self.assertEndpoint(
'expense_list',
'get',
form_data,
return_data,
status.HTTP_200_OK,
url_kwargs=self.user1_url_kwargs,
query_params=query_params
)
def test_list_filter_amount(self):
"""
Filtered expense list test.
"""
form_data = {}
return_data = [self.expense_return_data, self.expense3_return_data]
# Set url kwargs.
self.user1_url_kwargs.pop('pk')
self.user2_url_kwargs.pop('pk')
# Set query parameters.
query_params = {
'amount_0': '300.00',
'amount_1': '700.00'
}
# Filter all amounts.
self.assertEndpoint(
'expense_list',
'get',
form_data,
return_data,
status.HTTP_200_OK,
url_kwargs=self.user1_url_kwargs,
query_params=query_params
)
# Filter only amounts below 600.
query_params = {
'amount_0': '300.00',
'amount_1': '600.00'
}
return_data = [self.expense3_return_data, ]
self.assertEndpoint(
'expense_list',
'get',
form_data,
return_data,
status.HTTP_200_OK,
url_kwargs=self.user1_url_kwargs,
query_params=query_params
)
def test_detail(self):
"""
Single expense test.
"""
form_data = {}
# User can GET only its own records.
self.assertEndpoint(
'expense_detail',
'get',
form_data,
self.expense_return_data,
status.HTTP_200_OK,
url_kwargs=self.user1_url_kwargs,
)
self.assertEndpoint(
'expense_detail',
'get',
form_data,
{'detail': 'You do not have permission to perform this action.'},
status.HTTP_403_FORBIDDEN,
url_kwargs=self.user2_url_kwargs
)
# Admin can GET everything.
self.user.is_superuser = True
self.user.save()
self.assertEndpoint(
'expense_detail',
'get',
form_data,
self.expense2_return_data,
status.HTTP_200_OK,
url_kwargs=self.user2_url_kwargs,
)
def test_create(self):
"""
Create expense test.
"""
form_data = {
'amount': float(222),
'date': str(self.now.date()),
'time': str(self.now.time())
}
self.expense4_return_data = {
'amount': '222.00',
'comment': '',
'date': str(self.now.date()),
'description': '',
'time': str(self.now.time()),
'user': 'foobar'
}
# Set url kwargs.
self.user1_url_kwargs.pop('pk')
self.user2_url_kwargs.pop('pk')
# User can create only its own records.
self.assertEndpoint(
'expense_list',
'post',
form_data,
self.expense4_return_data,
status.HTTP_201_CREATED,
url_kwargs=self.user1_url_kwargs,
manual_check=['pk', ]
)
form_data['value'] = float(999)
self.assertEndpoint(
'expense_list',
'post',
form_data,
{'detail': 'You do not have permission to perform this action.'},
status.HTTP_403_FORBIDDEN,
url_kwargs=self.user2_url_kwargs
)
# Admin can create a record for any user.
self.user.is_superuser = True
self.user.save()
# Non specified user will be the current user.
self.assertEndpoint(
'expense_list',
'post',
form_data,
self.expense4_return_data,
status.HTTP_201_CREATED,
url_kwargs=self.user1_url_kwargs,
manual_check=['pk', ]
)
# Specified user.
form_data['user'] = 'foobar2'
self.expense4_return_data['user'] = 'foobar2'
self.assertEndpoint(
'expense_list',
'post',
form_data,
self.expense4_return_data,
status.HTTP_201_CREATED,
url_kwargs=self.user1_url_kwargs,
manual_check=['pk', ]
)
# Required fields.
form_data = {}
self.assertEndpoint(
'expense_list',
'post',
form_data,
{'amount': ['This field is required.']},
status.HTTP_400_BAD_REQUEST,
url_kwargs=self.user1_url_kwargs,
)
def test_update(self):
"""
Update expense test.
"""
form_data = {
'amount': float(777)
}
self.expense_return_data['amount'] = '777.00'
# User can only update its own records.
self.assertEndpoint(
'expense_detail',
'put',
form_data,
self.expense_return_data,
status.HTTP_200_OK,
url_kwargs=self.user1_url_kwargs,
)
self.assertEndpoint(
'expense_detail',
'put',
form_data,
{'detail': 'You do not have permission to perform this action.'},
status.HTTP_403_FORBIDDEN,
url_kwargs=self.user2_url_kwargs
)
# Admin can create a record for any user.
self.user.is_superuser = True
self.user.save()
form_data = {
'amount': float(888)
}
self.expense2_return_data['amount'] = '888.00'
self.assertEndpoint(
'expense_detail',
'put',
form_data,
self.expense2_return_data,
status.HTTP_200_OK,
url_kwargs=self.user2_url_kwargs,
)
# Required fields.
form_data = {}
self.assertEndpoint(
'expense_detail',
'put',
form_data,
{'amount': ['This field is required.']},
status.HTTP_400_BAD_REQUEST,
url_kwargs=self.user1_url_kwargs,
)
def test_delete(self):
"""
Delete expense test.
"""
form_data = {}
# User can only delete its own records.
self.assertEndpoint(
'expense_detail',
'delete',
form_data,
None,
status.HTTP_204_NO_CONTENT,
url_kwargs=self.user1_url_kwargs,
)
self.assertEndpoint(
'expense_detail',
'delete',
form_data,
{'detail': 'You do not have permission to perform this action.'},
status.HTTP_403_FORBIDDEN,
url_kwargs=self.user2_url_kwargs
)
# Admin can delete a record for any user.
self.user.is_superuser = True
self.user.save()
self.assertEndpoint(
'expense_detail',
'delete',
form_data,
None,
status.HTTP_204_NO_CONTENT,
url_kwargs=self.user2_url_kwargs,
)
class UsersTest(BaseTestCase):
def setUp(self):
super(UsersTest, self).setUp()
# Set authorization.
self.token = Token.objects.create(user=self.user)
self.client = APIClient()
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)
# Set url kwargs.
self.user1_url_kwargs = {
'username': self.user.username
}
self.user2_url_kwargs = {
'username': self.user2.username
}
# Set return data.
self.user_return_data = {
'username': 'foobar',
'email': 'foo@bar.com'
}
self.user2_return_data = {
'username': 'foobar2',
'email': 'foo@bar2.com'
}
def test_list(self):
"""
List of users test.
"""
form_data = {}
return_data = [self.user_return_data, self.user2_return_data]
# User will get an empty user list.
self.assertEndpoint(
'user_list',
'get',
form_data,
{'detail': 'You do not have permission to perform this action.'},
status.HTTP_403_FORBIDDEN,
)
# Admin can GET a full user list.
self.user.is_superuser = True
self.user.save()
self.assertEndpoint(
'user_list',
'get',
form_data,
return_data,
status.HTTP_200_OK,
)
# Manager can GET a full user list.
self.user.is_staff = True
self.user.is_superuser = False
self.user.save()
self.assertEndpoint(
'user_list',
'get',
form_data,
return_data,
status.HTTP_200_OK,
)
def test_detail(self):
"""
Single user test.
"""
form_data = {}
# User cannot get its own details.
self.assertEndpoint(
'user_detail',
'get',
form_data,
{'detail': 'You do not have permission to perform this action.'},
status.HTTP_403_FORBIDDEN,
url_kwargs=self.user1_url_kwargs,
)
# Admin can GET any user.
self.user.is_superuser = True
self.user.save()
self.assertEndpoint(
'user_detail',
'get',
form_data,
self.user2_return_data,
status.HTTP_200_OK,
url_kwargs=self.user2_url_kwargs,
)
# Manager can GET any user.
self.user.is_staff = True
self.user.is_superuser = False
self.user.save()
self.assertEndpoint(
'user_detail',
'get',
form_data,
self.user2_return_data,
status.HTTP_200_OK,
url_kwargs=self.user2_url_kwargs,
)
def test_create(self):
"""
Create user test.
"""
form_data = {
'username': 'foobar3',
'email': 'foo@bar3.com',
'password': 'mypassword',
'confirm_password': 'mypassword'
}
self.user3_return_data = {
'username': 'foobar3',
'email': 'foo@bar3.com',
}
# User cannot create other users.
self.assertEndpoint(
'user_list',
'post',
form_data,
{'detail': 'You do not have permission to perform this action.'},
status.HTTP_403_FORBIDDEN,
)
# Admin can create users.
self.user.is_superuser = True
self.user.save()
self.assertEndpoint(
'user_list',
'post',
form_data,
self.user3_return_data,
status.HTTP_201_CREATED,
)
# Manager can create users.
self.user.is_superuser = False
self.user.is_staff = True
self.user.save()
User.objects.get(username=form_data['username']).delete()
self.assertEndpoint(
'user_list',
'post',
form_data,
self.user3_return_data,
status.HTTP_201_CREATED,
)
def test_update(self):
"""
Update user test.
"""
form_data = {
'email': 'brandnew@email.com',
'user_type': 'is_staff'
}
self.user2_return_data['email'] = 'brandnew@email.com'
# User cannot update users.
self.assertEndpoint(
'user_detail',
'patch',
form_data,
{'detail': 'You do not have permission to perform this action.'},
status.HTTP_403_FORBIDDEN,
url_kwargs=self.user1_url_kwargs
)
# Admin can update users.
self.user.is_superuser = True
self.user.save()
self.assertEndpoint(
'user_detail',
'patch',
form_data,
self.user2_return_data,
status.HTTP_200_OK,
url_kwargs=self.user2_url_kwargs
)
self.user2 = User.objects.get(id=self.user2.id)
self.assertTrue(self.user2.is_staff)
# Manager can update users.
self.user.is_superuser = False
self.user.is_staff = True
self.user.save()
form_data['user_type'] = 'is_superuser'
self.assertEndpoint(
'user_detail',
'patch',
form_data,
self.user2_return_data,
status.HTTP_200_OK,
url_kwargs=self.user2_url_kwargs
)
self.user2 = User.objects.get(id=self.user2.id)
self.assertTrue(self.user2.is_superuser)
def test_delete(self):
"""
Delete user test.
"""
form_data = {}
# User cannot delete itself.
self.assertEndpoint(
'user_detail',
'delete',
form_data,
{'detail': 'You do not have permission to perform this action.'},
status.HTTP_403_FORBIDDEN,
url_kwargs=self.user1_url_kwargs
)
# Admin can delete users.
self.user.is_superuser = True
self.user.save()
self.assertEndpoint(
'user_detail',
'delete',
form_data,
None,
status.HTTP_204_NO_CONTENT,
url_kwargs=self.user2_url_kwargs
)
self.user2 = User.objects.create_user(
username='foobar2',
email='foo@bar2.com',
password='mypassword'
)
# Manager can delete users.
self.assertEndpoint(
'user_detail',
'delete',
form_data,
None,
status.HTTP_204_NO_CONTENT,
url_kwargs=self.user2_url_kwargs
)
|
{"/expense_track/api/tests/tests.py": ["/expense_track/api/tests/base.py"], "/expense_track/api/tests/tests_unit.py": ["/expense_track/api/tests/base.py", "/expense_track/api/permissions.py", "/expense_track/api/views.py", "/expense_track/api/serializers.py"], "/expense_track/api/views.py": ["/expense_track/api/permissions.py", "/expense_track/api/filters.py", "/expense_track/api/serializers.py"], "/expense_track/api/urls.py": ["/expense_track/api/views.py"], "/expense_track/expense_trackapp/admin.py": ["/expense_track/expense_trackapp/models.py"]}
|
27,750
|
sasakalaba/expense_track
|
refs/heads/master
|
/expense_track/api/tests/tests_unit.py
|
from .base import BaseTestCase
from ..permissions import IsOwnerOrAdmin, IsManagerOrAdmin
from ..views import ExpenseViewSet, UserViewSet
from ..serializers import UserSerializer
from django.contrib.auth.models import User
from rest_framework import serializers
from mock import MagicMock, call
class PermissionsTest(BaseTestCase):
def setUp(self):
super(PermissionsTest, self).setUp()
self.user.is_superuser = True
self.user.save()
self.request = MagicMock(user=self.user)
self.view = MagicMock(kwargs={'username': 'foobar'})
self.is_owner_or_admin = IsOwnerOrAdmin()
self.is_manager_or_admin = IsManagerOrAdmin()
def test_is_owner_or_admin_has_permission(self):
"""
has_permission test.
"""
# Superuser check.
self.assertTrue(
self.is_owner_or_admin.has_permission(self.request, self.view))
# Allowed user check.
self.user.is_superuser = False
self.user.save()
self.assertTrue(
self.is_owner_or_admin.has_permission(self.request, self.view))
# Denied permission check.
self.view.kwargs['username'] = 'wrong_user'
self.assertFalse(
self.is_owner_or_admin.has_permission(self.request, self.view))
def test_is_owner_or_admin_has_object_permission(self):
"""
has_object_permission test.
"""
obj = MagicMock(user=self.user)
# Superuser check.
self.assertTrue(self.is_owner_or_admin.has_object_permission(
self.request, self.view, obj))
# Allowed user check.
self.user.is_superuser = False
self.user.save()
self.assertTrue(self.is_owner_or_admin.has_object_permission(
self.request, self.view, obj))
# Denied permission check.
obj.user = User.objects.create_user(
username='foobar3', email='foo@bar3.com', password='mypassword'
)
self.assertFalse(self.is_owner_or_admin.has_object_permission(
self.request, self.view, obj))
def test_is_manager_or_admin_has_permission(self):
"""
has_permission test.
"""
# Superuser check.
self.assertTrue(
self.is_manager_or_admin.has_permission(self.request, self.view))
# Manager check.
self.user.is_superuser = False
self.user.is_staff = True
self.user.save()
self.assertTrue(
self.is_manager_or_admin.has_permission(self.request, self.view))
# User denied check.
self.user.is_superuser = False
self.user.is_staff = False
self.user.save()
self.assertFalse(
self.is_manager_or_admin.has_permission(self.request, self.view))
# Denied permission check.
self.view.kwargs['username'] = 'wrong_user'
self.assertFalse(
self.is_manager_or_admin.has_permission(self.request, self.view))
def test_is_manager_or_admin_has_object_permission(self):
"""
has_object_permission test.
"""
obj = MagicMock(user=self.user)
# Superuser check.
self.assertTrue(self.is_manager_or_admin.has_object_permission(
self.request, self.view, obj))
# Manager check.
self.user.is_superuser = False
self.user.is_staff = True
self.user.save()
self.assertTrue(self.is_manager_or_admin.has_object_permission(
self.request, self.view, obj))
# User denied check.
self.user.is_superuser = False
self.user.is_staff = False
self.user.save()
self.assertFalse(self.is_manager_or_admin.has_object_permission(
self.request, self.view, obj))
# Denied permission check.
obj.user = User.objects.create_user(
username='foobar3', email='foo@bar3.com', password='mypassword'
)
self.assertFalse(self.is_manager_or_admin.has_object_permission(
self.request, self.view, obj))
class ExpenseViewSetTest(BaseTestCase):
def setUp(self):
super(ExpenseViewSetTest, self).setUp()
self.user.is_superuser = True
self.user.save()
self.request = MagicMock(user=self.user)
self.expense_view = ExpenseViewSet(request=self.request)
self.serializer = MagicMock(initial_data={'user': self.user2})
def test_get_queryset(self):
"""
get_queryset test.
"""
# Superuser check.
self.assertEqual(
list(self.expense_view.get_queryset()), [self.expense, self.expense2])
# Regular user check.
self.user.is_superuser = False
self.user.save()
self.assertEqual(
list(self.expense_view.get_queryset()), [self.expense, ])
def test_perform_create(self):
"""
perform_create test.
"""
# Superuser check.
self.expense_view.perform_create(serializer=self.serializer)
self.assertEqual(
self.serializer.method_calls[0], call.save(user=self.user2))
# No initial data.
self.serializer.initial_data = {}
self.expense_view.perform_create(serializer=self.serializer)
self.assertEqual(
self.serializer.method_calls[1], call.save(user=self.user))
# Staff user check.
self.user.is_superuser = False
self.user.is_staff = True
self.user.save()
self.assertIsNone(
self.expense_view.perform_create(serializer=self.serializer))
# Regular user check.
self.user.is_staff = False
self.user.save()
self.expense_view.perform_create(serializer=self.serializer)
self.assertEqual(
self.serializer.method_calls[2], call.save(user=self.user))
class UserViewSetTest(BaseTestCase):
def setUp(self):
super(UserViewSetTest, self).setUp()
self.user.is_superuser = True
self.user.save()
self.request = MagicMock(user=self.user)
self.user_view = UserViewSet(request=self.request)
self.serializer = MagicMock(initial_data={'user': self.user2})
def test_get_queryset(self):
"""
get_queryset test.
"""
# Superuser check.
self.assertEqual(
list(self.user_view.get_queryset()), [self.user, self.user2])
# Regular user check.
self.user.is_superuser = False
self.user.save()
self.assertIsNone(self.user_view.get_queryset())
# Manager check.
self.user.is_staff = True
self.user.save()
self.assertEqual(
list(self.user_view.get_queryset()), [self.user, self.user2])
def test_get_object(self):
"""
get_object test.
"""
# Retrieve by username in url kwargs.
self.user_view.kwargs = {'username': self.user.username}
self.assertEqual(self.user_view.get_object(), self.user)
self.user_view.kwargs = {'username': self.user2.username}
self.assertEqual(self.user_view.get_object(), self.user2)
class UserSerializerTest(BaseTestCase):
def setUp(self):
super(UserSerializerTest, self).setUp()
self.serializer = UserSerializer()
def test_validate(self):
"""
validate test.
"""
# Passwords check.
self.serializer.initial_data = {
'password': 'mypassword',
'confirm_password': 'mypassword1'
}
with self.assertRaises(serializers.ValidationError) as validation_error:
self.serializer.validate({})
self.assertEqual(
validation_error.exception.detail[0], 'Passwords must match.')
self.serializer.initial_data['confirm_password'] = 'mypassword'
self.serializer.validate({})
# User types check.
self.serializer.initial_data = {
'user_type': 'foobar'
}
with self.assertRaises(serializers.ValidationError) as validation_error:
self.serializer.validate({})
self.assertEqual(
validation_error.exception.detail[0], 'Not valid user type.')
self.serializer.initial_data['user_type'] = 'is_staff'
self.serializer.validate({})
self.serializer.initial_data['user_type'] = 'is_superuser'
self.serializer.validate({})
|
{"/expense_track/api/tests/tests.py": ["/expense_track/api/tests/base.py"], "/expense_track/api/tests/tests_unit.py": ["/expense_track/api/tests/base.py", "/expense_track/api/permissions.py", "/expense_track/api/views.py", "/expense_track/api/serializers.py"], "/expense_track/api/views.py": ["/expense_track/api/permissions.py", "/expense_track/api/filters.py", "/expense_track/api/serializers.py"], "/expense_track/api/urls.py": ["/expense_track/api/views.py"], "/expense_track/expense_trackapp/admin.py": ["/expense_track/expense_trackapp/models.py"]}
|
27,751
|
sasakalaba/expense_track
|
refs/heads/master
|
/expense_track/expense_trackapp/models.py
|
from django.db import models
from django.utils import timezone
from django.conf import settings
def get_current_time():
return timezone.localtime(timezone.now()).time()
def get_current_date():
return timezone.localtime(timezone.now()).date()
class Expense(models.Model):
class Meta:
ordering = ['date', 'amount']
def __unicode__(self):
return ' '.join([str(self.date), str(self.amount)])
user = models.ForeignKey(settings.AUTH_USER_MODEL)
date = models.DateField(default=get_current_date)
time = models.TimeField(default=get_current_time)
description = models.CharField(max_length=1024, null=True, blank=True, default='')
amount = models.DecimalField(max_digits=10, decimal_places=2)
comment = models.CharField(max_length=1024, null=True, blank=True, default='')
|
{"/expense_track/api/tests/tests.py": ["/expense_track/api/tests/base.py"], "/expense_track/api/tests/tests_unit.py": ["/expense_track/api/tests/base.py", "/expense_track/api/permissions.py", "/expense_track/api/views.py", "/expense_track/api/serializers.py"], "/expense_track/api/views.py": ["/expense_track/api/permissions.py", "/expense_track/api/filters.py", "/expense_track/api/serializers.py"], "/expense_track/api/urls.py": ["/expense_track/api/views.py"], "/expense_track/expense_trackapp/admin.py": ["/expense_track/expense_trackapp/models.py"]}
|
27,752
|
sasakalaba/expense_track
|
refs/heads/master
|
/expense_track/expense_trackapp/views.py
|
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from .forms import RegisterForm
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
@login_required
def index(request):
token = Token.objects.get_or_create(user=request.user)[0]
context = {'user': token.user, 'token': token.key}
return render(request, 'index.html', context)
def register(request):
if request.method == 'POST':
form = RegisterForm(request.POST)
if form.is_valid():
User.objects.create_user(**form.cleaned_data)
return redirect('login')
context = {'form': form}
else:
context = {}
return render(request, 'registration/register.html', context)
|
{"/expense_track/api/tests/tests.py": ["/expense_track/api/tests/base.py"], "/expense_track/api/tests/tests_unit.py": ["/expense_track/api/tests/base.py", "/expense_track/api/permissions.py", "/expense_track/api/views.py", "/expense_track/api/serializers.py"], "/expense_track/api/views.py": ["/expense_track/api/permissions.py", "/expense_track/api/filters.py", "/expense_track/api/serializers.py"], "/expense_track/api/urls.py": ["/expense_track/api/views.py"], "/expense_track/expense_trackapp/admin.py": ["/expense_track/expense_trackapp/models.py"]}
|
27,753
|
sasakalaba/expense_track
|
refs/heads/master
|
/expense_track/api/permissions.py
|
from rest_framework import permissions
class IsOwnerOrAdmin(permissions.BasePermission):
"""
Only object user or admin can CRUD.
"""
def has_permission(self, request, view, **kwargs):
if request.user.is_superuser:
return True
return request.user.username == view.kwargs.get('username')
def has_object_permission(self, request, view, obj):
if request.user.is_superuser:
return True
return obj.user == request.user
class IsManagerOrAdmin(permissions.BasePermission):
"""
Only object user or admin can CRUD.
"""
def has_permission(self, request, view, **kwargs):
if request.user.is_superuser or request.user.is_staff:
return True
return False
def has_object_permission(self, request, view, obj):
if request.user.is_superuser or request.user.is_staff:
return True
return False
|
{"/expense_track/api/tests/tests.py": ["/expense_track/api/tests/base.py"], "/expense_track/api/tests/tests_unit.py": ["/expense_track/api/tests/base.py", "/expense_track/api/permissions.py", "/expense_track/api/views.py", "/expense_track/api/serializers.py"], "/expense_track/api/views.py": ["/expense_track/api/permissions.py", "/expense_track/api/filters.py", "/expense_track/api/serializers.py"], "/expense_track/api/urls.py": ["/expense_track/api/views.py"], "/expense_track/expense_trackapp/admin.py": ["/expense_track/expense_trackapp/models.py"]}
|
27,754
|
sasakalaba/expense_track
|
refs/heads/master
|
/expense_track/api/views.py
|
import json
from datetime import datetime
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from django.db.models import Sum, Avg
from rest_framework import status, viewsets, permissions
from rest_framework.decorators import api_view
from rest_framework.response import Response
from expense_trackapp.models import Expense
from .permissions import IsOwnerOrAdmin, IsManagerOrAdmin
from .filters import ExpenseFilter
from .serializers import (
UserSerializer,
ExpenseSerializer
)
@api_view()
def not_found_404(request):
"""
404 view
"""
return Response({'detail': 'Not found.'}, status=status.HTTP_404_NOT_FOUND)
class AccountViewSet(viewsets.ModelViewSet):
serializer_class = UserSerializer
permission_classes = (permissions.AllowAny,)
class ExpenseViewSet(viewsets.ModelViewSet):
serializer_class = ExpenseSerializer
permission_classes = [IsOwnerOrAdmin, ]
lookup_fields = ['username', 'pk', 'week']
filter_class = ExpenseFilter
def get_queryset(self):
if self.request.user.is_superuser:
return Expense.objects.all()
else:
return Expense.objects.filter(user=self.request.user)
def perform_create(self, serializer):
user = self.request.user
if self.request.user.is_superuser:
username = serializer.initial_data.get('user')
if username:
user = User.objects.get(username=username)
elif self.request.user.is_staff:
return None
serializer.save(user=user)
def report(self, request, **kwargs):
try:
user = User.objects.get(username=kwargs.get('username'))
except User.DoesNotExist as error:
return Response(str(error))
week = kwargs.get('week', datetime.now().isocalendar()[1])
weekly_expenses = Expense.objects.filter(user=user, date__week=week)
total = weekly_expenses.aggregate(Sum('amount'))['amount__sum']
average = weekly_expenses.aggregate(Avg('amount'))['amount__avg']
report = 'Weekly report:\n \tTotal: %s\n\tAverage: %s\n' % (total, average)
return Response(report)
class UserViewSet(viewsets.ModelViewSet):
serializer_class = UserSerializer
permission_classes = [IsManagerOrAdmin, ]
lookup_field = 'username'
def get_queryset(self):
if self.request.user.is_superuser or self.request.user.is_staff:
return User.objects.all()
def get_object(self):
username = self.kwargs.get('username')
if username:
return User.objects.get(username=username)
def me(self, request, **kwargs):
try:
token = Token.objects.get(user=request.user)
except Token.DoesNotExist as error:
return Response(str(error))
return Response(
json.dumps({'token': token.key, 'user': token.user.username}))
|
{"/expense_track/api/tests/tests.py": ["/expense_track/api/tests/base.py"], "/expense_track/api/tests/tests_unit.py": ["/expense_track/api/tests/base.py", "/expense_track/api/permissions.py", "/expense_track/api/views.py", "/expense_track/api/serializers.py"], "/expense_track/api/views.py": ["/expense_track/api/permissions.py", "/expense_track/api/filters.py", "/expense_track/api/serializers.py"], "/expense_track/api/urls.py": ["/expense_track/api/views.py"], "/expense_track/expense_trackapp/admin.py": ["/expense_track/expense_trackapp/models.py"]}
|
27,755
|
sasakalaba/expense_track
|
refs/heads/master
|
/expense_track/api/serializers.py
|
from django.contrib.auth.models import User
from rest_framework import serializers, validators
from expense_trackapp.models import Expense
class UserSerializer(serializers.ModelSerializer):
"""
User serializer.
"""
class Meta:
model = User
fields = ('username', 'password', 'email', 'confirm_password', 'user_type')
extra_kwargs = {'password': {'write_only': True}}
email = serializers.EmailField(
required=True,
validators=[validators.UniqueValidator(queryset=User.objects.all())]
)
confirm_password = serializers.ReadOnlyField()
user_type = serializers.ReadOnlyField()
def validate(self, attrs):
if self.initial_data.get('password') != self.initial_data.get('confirm_password'):
raise serializers.ValidationError('Passwords must match.')
user_type = self.initial_data.get('user_type')
if user_type:
if user_type not in ['is_staff', 'is_superuser']:
raise serializers.ValidationError('Not valid user type.')
else:
attrs[user_type] = True
return attrs
def create(self, validated_data):
user = User.objects.create_user(**validated_data)
return user
class ExpenseSerializer(serializers.ModelSerializer):
"""
Expense serializer.
"""
user = serializers.ReadOnlyField(source='user.username')
class Meta:
model = Expense
fields = ('user', 'pk', 'date', 'time', 'amount', 'description', 'comment')
|
{"/expense_track/api/tests/tests.py": ["/expense_track/api/tests/base.py"], "/expense_track/api/tests/tests_unit.py": ["/expense_track/api/tests/base.py", "/expense_track/api/permissions.py", "/expense_track/api/views.py", "/expense_track/api/serializers.py"], "/expense_track/api/views.py": ["/expense_track/api/permissions.py", "/expense_track/api/filters.py", "/expense_track/api/serializers.py"], "/expense_track/api/urls.py": ["/expense_track/api/views.py"], "/expense_track/expense_trackapp/admin.py": ["/expense_track/expense_trackapp/models.py"]}
|
27,756
|
sasakalaba/expense_track
|
refs/heads/master
|
/expense_track/expense_trackapp/apps.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class ExpenseTrackappConfig(AppConfig):
name = 'expense_trackapp'
|
{"/expense_track/api/tests/tests.py": ["/expense_track/api/tests/base.py"], "/expense_track/api/tests/tests_unit.py": ["/expense_track/api/tests/base.py", "/expense_track/api/permissions.py", "/expense_track/api/views.py", "/expense_track/api/serializers.py"], "/expense_track/api/views.py": ["/expense_track/api/permissions.py", "/expense_track/api/filters.py", "/expense_track/api/serializers.py"], "/expense_track/api/urls.py": ["/expense_track/api/views.py"], "/expense_track/expense_trackapp/admin.py": ["/expense_track/expense_trackapp/models.py"]}
|
27,757
|
sasakalaba/expense_track
|
refs/heads/master
|
/expense_track/project/urls.py
|
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth.views import login, logout
from rest_framework.authtoken.views import obtain_auth_token
from api.urls import account_register
from expense_trackapp.views import register
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^login/$', login, name='login'),
url(r'^logout/$', logout, name='logout'),
url(r'^register/$', register, name='register'),
url(r'', include('expense_trackapp.urls')),
url(r'^api/', include('api.urls')),
url(r'^api-auth$', obtain_auth_token, name='account_login'),
url(r'^api-register$', account_register, name='account_register'),
]
|
{"/expense_track/api/tests/tests.py": ["/expense_track/api/tests/base.py"], "/expense_track/api/tests/tests_unit.py": ["/expense_track/api/tests/base.py", "/expense_track/api/permissions.py", "/expense_track/api/views.py", "/expense_track/api/serializers.py"], "/expense_track/api/views.py": ["/expense_track/api/permissions.py", "/expense_track/api/filters.py", "/expense_track/api/serializers.py"], "/expense_track/api/urls.py": ["/expense_track/api/views.py"], "/expense_track/expense_trackapp/admin.py": ["/expense_track/expense_trackapp/models.py"]}
|
27,758
|
sasakalaba/expense_track
|
refs/heads/master
|
/expense_track/api/filters.py
|
import django_filters
from expense_trackapp.models import Expense
class ExpenseFilter(django_filters.rest_framework.FilterSet):
date = django_filters.DateFromToRangeFilter()
time = django_filters.TimeRangeFilter()
amount = django_filters.RangeFilter()
class Meta:
model = Expense
fields = ['date', 'time', 'amount']
|
{"/expense_track/api/tests/tests.py": ["/expense_track/api/tests/base.py"], "/expense_track/api/tests/tests_unit.py": ["/expense_track/api/tests/base.py", "/expense_track/api/permissions.py", "/expense_track/api/views.py", "/expense_track/api/serializers.py"], "/expense_track/api/views.py": ["/expense_track/api/permissions.py", "/expense_track/api/filters.py", "/expense_track/api/serializers.py"], "/expense_track/api/urls.py": ["/expense_track/api/views.py"], "/expense_track/expense_trackapp/admin.py": ["/expense_track/expense_trackapp/models.py"]}
|
27,759
|
sasakalaba/expense_track
|
refs/heads/master
|
/expense_track/api/urls.py
|
from rest_framework.urlpatterns import format_suffix_patterns
from django.conf.urls import url
from .views import (
AccountViewSet,
ExpenseViewSet,
UserViewSet,
not_found_404
)
"""
Account views.
"""
account_register = AccountViewSet.as_view({
'post': 'create'
})
"""
Expense views.
"""
expense_list = ExpenseViewSet.as_view({
'get': 'list',
'post': 'create'
})
expense_detail = ExpenseViewSet.as_view({
'get': 'retrieve',
'put': 'update',
'delete': 'destroy'
})
"""
Report views.
"""
report_detail = ExpenseViewSet.as_view({
'get': 'report'
})
"""
User views.
"""
user_list = UserViewSet.as_view({
'get': 'list',
'post': 'create'
})
user_detail = UserViewSet.as_view({
'get': 'retrieve',
'patch': 'partial_update',
'delete': 'destroy'
})
user_me = UserViewSet.as_view({
'get': 'me'
})
urlpatterns = format_suffix_patterns([
url(r'^users/$', user_list, name='user_list'),
url(r'^users/me$', user_me, name='user_me'),
url(r'^users/(?P<username>[A-Za-z0-9-]+)/$', user_detail, name='user_detail'),
url(r'^users/(?P<username>[A-Za-z0-9-]+)/expenses/$', expense_list, name='expense_list'),
url(r'^users/(?P<username>[A-Za-z0-9-]+)/expenses/report/(?P<week>\d+)$', report_detail, name='report_detail'),
url(r'^users/(?P<username>[A-Za-z0-9-]+)/expenses/(?P<pk>\d+)$', expense_detail, name='expense_detail'),
url(r'^.*$', not_found_404, name='not_found_404')
])
|
{"/expense_track/api/tests/tests.py": ["/expense_track/api/tests/base.py"], "/expense_track/api/tests/tests_unit.py": ["/expense_track/api/tests/base.py", "/expense_track/api/permissions.py", "/expense_track/api/views.py", "/expense_track/api/serializers.py"], "/expense_track/api/views.py": ["/expense_track/api/permissions.py", "/expense_track/api/filters.py", "/expense_track/api/serializers.py"], "/expense_track/api/urls.py": ["/expense_track/api/views.py"], "/expense_track/expense_trackapp/admin.py": ["/expense_track/expense_trackapp/models.py"]}
|
27,760
|
sasakalaba/expense_track
|
refs/heads/master
|
/expense_track/expense_trackapp/tests.py
|
from django import forms
from django.test import TestCase, Client
from .forms import RegisterForm
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
class FormsTest(TestCase):
def setUp(self):
self.register_data = {
'username': 'foobar',
'email': 'foo@bar.com',
'password1': 'mypassword',
'password2': 'mypassword',
}
self.register_form = RegisterForm()
# Mock cleaned data.
self.register_form.cleaned_data = self.register_data.copy()
def test_register_clean_mail(self):
"""
Register clean email field validation.
"""
# Raise ValidationError if email already exists.
User.objects.create_user(
email='foo@bar.com', username='foobar', password='mypassword')
with self.assertRaises(forms.ValidationError) as validation_error:
self.register_form.clean_email()
self.assertEqual(
validation_error.exception.messages[0], 'Email already in use.')
# Ensure that unique email will pass validation.
User.objects.get().delete()
self.assertEqual('foo@bar.com', self.register_form.clean_email())
def test_register_clean(self):
"""
Register clean validation.
"""
# Make sure password1 and password2 arguments are properly removed and
# a generic password argument is set.
expected_data = {
'username': 'foobar',
'email': 'foo@bar.com',
'password': 'mypassword'
}
self.assertEqual(self.register_form.clean(), expected_data)
# Make sure that missing password arguments wont break our app.
self.register_form.cleaned_data = self.register_data.copy()
self.register_form.cleaned_data.pop('password1')
self.register_form.cleaned_data.pop('password2')
expected_data['password'] = None
self.assertEqual(self.register_form.clean(), expected_data)
class ViewsTest(TestCase):
def setUp(self):
self.client = Client()
self.user = User.objects.create_user(
username='foobar',
email='foo@bar.com',
password='mypassword'
)
def test_login(self):
"""
Login test.
"""
login_data = {
'username': 'foobar1',
'password': 'mypassword'
}
self.client.logout()
# Login template.
response = self.client.get(reverse('login'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'registration/login.html')
# Wrong credentials.
self.assertFalse(self.client.login(**login_data))
# Unauthorized redirect.
response = self.client.get(reverse('index'), follow=True)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'registration/login.html')
# Authorized redirect.
login_data['username'] = 'foobar'
response = self.client.post(reverse('login'), login_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'index.html')
self.assertTrue(self.client.login(**login_data))
def test_logout(self):
"""
Logout test.
"""
# Redirect to login.
response = self.client.get(reverse('logout'), follow=True)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'registration/login.html')
def test_register_basic(self):
"""
Register basic functionality test.
"""
register_data = {
'username': 'foobar1',
'email': 'foo@bar1.com',
'password1': 'mypassword',
'password2': 'mypassword',
}
# Register template.
response = self.client.get(reverse('register'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'registration/register.html')
# Successful registration.
response = self.client.post(
reverse('register'), register_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'registration/login.html')
def test_register_unique_values(self):
"""
Register unique values test.
"""
# Raise errors by using an existing user register data.
register_data = {
'username': 'foobar',
'email': 'foo@bar.com',
'password1': 'mypassword',
'password2': 'mypassword1',
}
# Registration with used email.
response = self.client.post(reverse('register'), register_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'registration/register.html')
self.assertIn('Email already in use.', response.content)
# Register with mismatching passwords.
self.assertIn(
'The two password fields didn't match.', response.content)
# Register with same username.
self.assertIn(
'A user with that username already exists.', response.content)
# Registration with invalid email.
register_data['email'] = 'foobar2.com'
response = self.client.post(reverse('register'), register_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'registration/register.html')
self.assertIn('Enter a valid email address.', response.content)
def test_register_missing_values(self):
"""
Register missing values test.
"""
response = self.client.post(reverse('register'), {})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'registration/register.html')
self.assertEqual(response.content.count('This field is required.'), 4)
|
{"/expense_track/api/tests/tests.py": ["/expense_track/api/tests/base.py"], "/expense_track/api/tests/tests_unit.py": ["/expense_track/api/tests/base.py", "/expense_track/api/permissions.py", "/expense_track/api/views.py", "/expense_track/api/serializers.py"], "/expense_track/api/views.py": ["/expense_track/api/permissions.py", "/expense_track/api/filters.py", "/expense_track/api/serializers.py"], "/expense_track/api/urls.py": ["/expense_track/api/views.py"], "/expense_track/expense_trackapp/admin.py": ["/expense_track/expense_trackapp/models.py"]}
|
27,761
|
sasakalaba/expense_track
|
refs/heads/master
|
/expense_track/expense_trackapp/admin.py
|
from django.contrib import admin
from .models import Expense
class ExpenseAdmin(admin.ModelAdmin):
model = Expense
list_display = ('date', 'amount')
admin.site.register(Expense, ExpenseAdmin)
|
{"/expense_track/api/tests/tests.py": ["/expense_track/api/tests/base.py"], "/expense_track/api/tests/tests_unit.py": ["/expense_track/api/tests/base.py", "/expense_track/api/permissions.py", "/expense_track/api/views.py", "/expense_track/api/serializers.py"], "/expense_track/api/views.py": ["/expense_track/api/permissions.py", "/expense_track/api/filters.py", "/expense_track/api/serializers.py"], "/expense_track/api/urls.py": ["/expense_track/api/views.py"], "/expense_track/expense_trackapp/admin.py": ["/expense_track/expense_trackapp/models.py"]}
|
27,762
|
RossXli/Bokeh_Showcase_Silkworm
|
refs/heads/master
|
/controller/controller.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Project: silkworm.
Description:
Silkworm is a poll-based US Presidential Election forecaster.
Author: Mike Woodward
Created on: 2020-07-26
"""
# %%---------------------------------------------------------------------------
# Module metadata
# -----------------------------------------------------------------------------
__author__ = "Mike Woodward"
__license__ = "MIT"
# %%---------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from bokeh.io import curdoc
from bokeh.models.widgets import Tabs
from model.model import Model
from view.about import About
from view.managedata import ManageData
from view.runforecast import RunForecast
from view.forecastbytime import ForecastByTime
from view.forecastdistribution import ForecastDistribution
from view.forecastbygeography import ForecastByGeography
from view.forecastbystate import ForecastByState
from view.pollviewer import PollViewer
# %%---------------------------------------------------------------------------
# Controller
# -----------------------------------------------------------------------------
class Controller():
"""The Controller class is part of the model-view-controller architecture.
Links views and Model and controls interaction between them."
"""
# %%
def __init__(self):
"""Initialize the object.
First part of two-part initialization.
The initialization done here should be low risk - we need the GUI to
be built before we can show error messages.
"""
self.model = Model()
# Create the panels by instantiating each of the tabs. Note the order
# in the list is the tab order in the GUI.
self.about = About(self)
self.managedata = ManageData(self)
self.runforecast = RunForecast(self)
self.forecastbytime = ForecastByTime(self)
self.forecastdistribution = ForecastDistribution(self)
self.forecastbygeography = ForecastByGeography(self)
self.forecastbystate = ForecastByState(self)
self.pollviewer = PollViewer(self)
self.panels = [self.about,
self.managedata,
self.runforecast,
self.forecastbytime,
self.forecastdistribution,
self.forecastbygeography,
self.forecastbystate,
self.pollviewer]
# Create tabs, note the order here is the display order.
self.tabs = Tabs(tabs=[p.panel for p in self.panels])
# %%
def setup(self):
"""Set up object. Second part of two-part initialization."""
for panel in self.panels:
panel.setup()
# %%
def update(self):
"""Update the object."""
self.model.read_rawdata()
years = self.model.get_years()
self.managedata.update(years)
self.runforecast.update(years)
# %%
def cross_check(self):
"""Cross checks the model data."""
return self.model.cross_check()
# %%
def calculate_forecast(self, year):
"""Calculate the forecast for the election year."""
self.model.calculate_forecast(year)
if ~self.model.error_status:
return "Forecast completed without error."
else:
return self.model.error_string
# %%
def load_forecast(self, year):
"""Load forecast data into model."""
self.model.load_forecast(year)
if ~self.model.error_status:
# Update the plots with the newly loaded data
self.forecastbytime.update(self.model.electoral_maximum)
self.forecastdistribution.update(self.model.electoral_distribution)
self.forecastbygeography.update(self.model.state)
self.forecastbystate.update(self.model.state, self.model.polls)
self.pollviewer.update(self.model.polls)
return "Year forecast loaded without error."
else:
return self.model.error_string
# %%
def display(self):
"""Display the visualization.
Calls the Bokeh methods to make the
application start. Note the server actually renders the GUI in the
browser.
Returns
-------
None
"""
curdoc().add_root(self.tabs)
curdoc().title = 'silkworm'
|
{"/controller/controller.py": ["/model/model.py", "/view/about.py", "/view/managedata.py", "/view/runforecast.py", "/view/forecastbytime.py", "/view/forecastdistribution.py", "/view/forecastbygeography.py", "/view/forecastbystate.py", "/view/pollviewer.py"], "/main.py": ["/controller/controller.py"], "/model/model.py": ["/model/statemodel.py", "/model/electoralcollegemodel.py"]}
|
27,763
|
RossXli/Bokeh_Showcase_Silkworm
|
refs/heads/master
|
/model/electoralcollegemodel.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Project: silkworm.
Description:
Silkworm is a poll-based US Presidential Election forecaster.
Author: Mike Woodward
Created on: 2020-07-26
"""
# %%---------------------------------------------------------------------------
# Module metadata
# -----------------------------------------------------------------------------
__author__ = "Mike Woodward"
__license__ = "MIT"
# %%---------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import numpy as np
import pandas as pd
import numpy
import scipy
import scipy.special
# %%---------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
def generator_polynomial(probability, allocation):
"""
Calculate the generator polynomial.
int is needed because of minor Pandas requirement.
"""
return [probability] + [0]*int(allocation - 1) + [1 - probability]
# %%---------------------------------------------------------------------------
# ElectorlCollegeModelModel
# -----------------------------------------------------------------------------
class ElectoralCollegeModel():
"""Models the state election results."""
# %%
def __init__(self,
state,
allocations,
election_year):
"""Initialize."""
self.year = election_year
self.state = state
self.allocations = allocations
# %%
def setup(self):
"""
Set up the data structures.
Riskier setup done here, so init method less likely to fail.
"""
self.state = self.state.merge(
self.allocations[
self.allocations['Year'] == self.year][['State abbreviation',
'Allocation']],
on='State abbreviation',
how='inner')
# %%
def update(self):
"""Update the electoral college forecast with state data."""
_ecv = (self.allocations
.query('Year == {0}'
.format(self.year))['Allocation']
.sum())
# Sort state data by date and by electoral college vote allocation
self.state = self.state.sort_values(by=['Date', 'Allocation'])
# Create the generator polynomials for each date/state/party
self.state['Democratic polynomial'] = \
self.state.apply(lambda x: generator_polynomial(
x['Democratic probability'], x['Allocation']), axis=1)
self.state['Republican polynomial'] = \
self.state.apply(lambda x: generator_polynomial(
x['Republican probability'], x['Allocation']), axis=1)
# Pre-allocate to avoid appends growing piece by piece
pdf_max = [None]*self.state['Date'].nunique()
df_ec = [None]*self.state['Date'].nunique()
# Go through every date working out the electoral college PDF
for index, date in enumerate(self.state['Date'].unique()):
date_slice = self.state[self.state['Date'] == date]
cum_dem = [1]
for array in date_slice['Democratic polynomial']:
cum_dem = np.convolve(cum_dem, array)
cum_dem = np.fliplr([cum_dem])[0]
cum_rep = [1]
for array in date_slice['Republican polynomial']:
cum_rep = np.convolve(cum_rep, array)
cum_rep = np.fliplr([cum_rep])[0]
pdf_max[index] = {
'Date': date,
'Democratic maximum': np.where(cum_dem == cum_dem.max())[0][0],
'Republican maximum': np.where(cum_rep == cum_rep.max())[0][0]
}
df_ec[index] = pd.DataFrame(
{'Date': [date]*(_ecv+1),
'Electoral college vote': list(range(_ecv + 1)),
'Democratic distribution': cum_dem,
'Republican distribution': cum_rep})
self.electoral_distribution = pd.concat(df_ec)
self.electoral_maximum = pd.DataFrame(pdf_max)
|
{"/controller/controller.py": ["/model/model.py", "/view/about.py", "/view/managedata.py", "/view/runforecast.py", "/view/forecastbytime.py", "/view/forecastdistribution.py", "/view/forecastbygeography.py", "/view/forecastbystate.py", "/view/pollviewer.py"], "/main.py": ["/controller/controller.py"], "/model/model.py": ["/model/statemodel.py", "/model/electoralcollegemodel.py"]}
|
27,764
|
RossXli/Bokeh_Showcase_Silkworm
|
refs/heads/master
|
/view/forecastdistribution.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Project: silkworm.
Description:
Silkworm is a poll-based US Presidential Election forecaster.
Author: Mike Woodward
Created on: 2020-07-26
"""
# %%---------------------------------------------------------------------------
# Module metadata
# -----------------------------------------------------------------------------
__author__ = "Mike Woodward"
__license__ = "MIT"
# %%---------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from bokeh.models.widgets import (DateSlider,
Panel)
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, Legend, Span
from bokeh.layouts import column, row, Spacer
from scipy.stats import norm
# %%---------------------------------------------------------------------------
# ForecastDistribution
# -----------------------------------------------------------------------------
class ForecastDistribution():
"""Shows the forecasted electoral college vote distribution."""
# %%
def __init__(self, controller):
"""Initialize object.
First part of two-part initialization.
Put initialization code here that's very unlikely to fail.
"""
self.controller = controller
self.electoral_distribution = None
# Shows the forecast for electoral college votes over time.
self.ecvdistribution = figure(
title="""Electoral college votes distribution""",
x_axis_type="""linear""",
x_axis_label="""Electoral college votes""",
y_axis_label="""Probability""",
sizing_mode="""stretch_both""")
# Fake data to make sure we don't get an empty renderer message
self.cds = ColumnDataSource(
data={'Electoral college votes': list(range(539)),
'Democratic distribution': norm.pdf(range(539),
loc=200,
scale=100),
'Republican distribution': norm.pdf(range(539),
loc=400,
scale=100)}
)
_dg = self.ecvdistribution.vbar(
x='Electoral college votes',
top='Democratic distribution',
fill_color='blue',
line_color='blue',
line_width=1,
width=1,
alpha=0.2,
source=self.cds)
_rg = self.ecvdistribution.vbar(
x='Electoral college votes',
top='Republican distribution',
fill_color='red',
line_color='red',
line_width=1,
width=1,
alpha=0.2,
source=self.cds)
# 270 to win line
_win270 = Span(location=270, dimension='height')
self.ecvdistribution.add_layout(_win270)
# Add a legend outside of the plot
_legend = Legend(items=[('Democratic', [_dg]),
('Republican', [_rg])],
location='top_right')
self.ecvdistribution.add_layout(_legend, 'right')
self.ecvdistribution.legend.click_policy = "hide"
self.ecvdistribution.y_range.only_visible = True
# The date for charting.
self.choosethedatefordisplay = DateSlider(
title="""Choose the date for display""",
start="""2018-11-13T20:20:39+00:00""",
end="""2025-11-13T20:20:39+00:00""",
step=24*60*60*1000,
value="""2018-11-13T20:20:39+00:00""",
sizing_mode="stretch_width")
# Layout the widgets
r1 = row(children=[Spacer(width=10),
self.choosethedatefordisplay,
Spacer(width=10)],
sizing_mode='stretch_width')
self.layout = column(children=[self.ecvdistribution,
r1,
Spacer(height=75,
sizing_mode='scale_width')],
sizing_mode='stretch_both')
self.panel = Panel(child=self.layout,
title='Vote forecast distribution')
# %%
def setup(self):
"""Set up object.
Second part of two-part initialization.
Place initialization code here that's more likely to fail.
"""
# Setup the callbacks.
self.choosethedatefordisplay.on_change(
"value",
self.callback_choosethedatefordisplay)
# %%
def update(self, electoral_distribution):
"""Update view object."""
self.electoral_distribution = electoral_distribution
self.choosethedatefordisplay.end =\
self.electoral_distribution['Date'].max()
self.choosethedatefordisplay.value =\
self.electoral_distribution['Date'].max()
self.choosethedatefordisplay.start =\
self.electoral_distribution['Date'].min()
self._update_chart(self.choosethedatefordisplay.value_as_datetime)
# %%
def _update_chart(self, date):
"""Redraw the chart by updating underlying data."""
_slice =\
(self.electoral_distribution[
self.electoral_distribution['Date'] == date]
[['Electoral college vote',
'Democratic distribution',
'Republican distribution']])
self.cds.data =\
{'Electoral college votes': _slice['Electoral college vote'],
'Democratic distribution': _slice['Democratic distribution'],
'Republican distribution': _slice['Republican distribution']}
# %%
def callback_choosethedatefordisplay(self, attrname, old, new):
"""Execute callbackfor the DateSlider self.choosethedatefordisplay."""
self._update_chart(self.choosethedatefordisplay.value_as_datetime)
|
{"/controller/controller.py": ["/model/model.py", "/view/about.py", "/view/managedata.py", "/view/runforecast.py", "/view/forecastbytime.py", "/view/forecastdistribution.py", "/view/forecastbygeography.py", "/view/forecastbystate.py", "/view/pollviewer.py"], "/main.py": ["/controller/controller.py"], "/model/model.py": ["/model/statemodel.py", "/model/electoralcollegemodel.py"]}
|
27,765
|
RossXli/Bokeh_Showcase_Silkworm
|
refs/heads/master
|
/view/forecastbytime.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Project: silkworm.
Description:
Silkworm is a poll-based US Presidential Election forecaster.
Author: Mike Woodward
Created on: 2020-07-26
"""
# %%---------------------------------------------------------------------------
# Module metadata
# -----------------------------------------------------------------------------
__author__ = "Mike Woodward"
__license__ = "MIT"
# %%---------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from bokeh.models.widgets import (Panel)
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, HoverTool, Legend, Span
from bokeh.layouts import column, Spacer
import pandas as pd
# %%---------------------------------------------------------------------------
# ForecastByTime
# -----------------------------------------------------------------------------
class ForecastByTime():
"""Shows the forecasted electoral college votes over time."""
# %%
def __init__(self, controller):
"""Initialize object.
First part of two-part initialization.
Put initialization code here that's very unlikely to fail.
"""
self.controller = controller
# Shows the forecast for electoral college votes over time.
self.electoralcollegevotesbytime = figure(
title="""Electoral college votes by date""",
x_axis_type="""datetime""",
x_axis_label="""Date""",
y_axis_label="""Electoral college votes""",
sizing_mode="""stretch_both""")
# Create dummy data for the plot
_df = pd.DataFrame({'Date': ['2030-12-31', '2031-12-31'],
'Democratic maximum': [300, 238],
'Republican maximum': [238, 300]})
_df['Date'] = pd.to_datetime(_df['Date'])
self.cds = ColumnDataSource(_df)
# Draw dummy lines
_dg = self.electoralcollegevotesbytime.line(
x='Date',
y='Democratic maximum',
line_color='blue',
line_width=2,
source=self.cds)
_rg = self.electoralcollegevotesbytime.line(
x='Date',
y='Republican maximum',
line_color='red',
line_width=2,
source=self.cds)
# 270 to win line
_win270 = Span(location=270, dimension='width')
self.electoralcollegevotesbytime.add_layout(_win270)
# Add a legend outside of the plot
_legend = Legend(items=[('Democratic', [_dg]),
('Republican', [_rg])],
location='top_right')
self.electoralcollegevotesbytime.add_layout(_legend, 'right')
self.electoralcollegevotesbytime.legend.click_policy = "hide"
self.electoralcollegevotesbytime.y_range.only_visible = True
# Hover tip
# ---------
# Now set up the hover tool
hover = HoverTool(point_policy="follow_mouse",
renderers=[_dg, _rg],
tooltips=[("Date", '@Date{%F}'),
("Democratic",
"@{Democratic maximum}"),
("Republican",
"@{Republican maximum}")],
formatters={'@Date': 'datetime'})
self.electoralcollegevotesbytime.add_tools(hover)
# Layout the widgets
self.layout = column(children=[self.electoralcollegevotesbytime,
Spacer(sizing_mode='scale_width',
height=50)],
sizing_mode="stretch_both")
self.panel = Panel(child=self.layout,
title='Vote forecast by time')
# %%
def setup(self):
"""Set up object.
Second part of two-part initialization.
Place initialization code here that's more likely to fail.
"""
# No widgets on this tab have a callback, so this is an empty method.
pass
# %%
def update(self, electoral_maximum):
"""Update view object."""
self.cds.data = {'Date': electoral_maximum['Date'],
'Democratic maximum':
electoral_maximum['Democratic maximum'],
'Republican maximum':
electoral_maximum['Republican maximum']}
|
{"/controller/controller.py": ["/model/model.py", "/view/about.py", "/view/managedata.py", "/view/runforecast.py", "/view/forecastbytime.py", "/view/forecastdistribution.py", "/view/forecastbygeography.py", "/view/forecastbystate.py", "/view/pollviewer.py"], "/main.py": ["/controller/controller.py"], "/model/model.py": ["/model/statemodel.py", "/model/electoralcollegemodel.py"]}
|
27,766
|
RossXli/Bokeh_Showcase_Silkworm
|
refs/heads/master
|
/view/forecastbystate.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Project: silkworm.
Description:
Silkworm is a poll-based US Presidential Election forecaster.
Author: Mike Woodward
Created on: 2020-07-26
"""
# %%---------------------------------------------------------------------------
# Module metadata
# -----------------------------------------------------------------------------
__author__ = "Mike Woodward"
__license__ = "MIT"
# %%---------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import pandas as pd
from bokeh.layouts import column, row, Spacer
from bokeh.models.widgets import (Panel,
Select)
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, HoverTool, Legend
from random import sample
# %%---------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
# %%---------------------------------------------------------------------------
# ForecastByState
# -----------------------------------------------------------------------------
class ForecastByState():
"""Shows US electoral college vote forecast by state."""
# %%
def __init__(self, controller):
"""Initialize object.
First part of two-part initialization.
Put initialization code here that's very unlikely to fail.
"""
self.controller = controller
self.state = None
self.polls = None
# figure
# ======
# Shows the forecast for electoral college votes over time.
self.statetime = figure(
title="""Party voter proportions by state""",
x_axis_type="""datetime""",
x_axis_label="""Date""",
y_axis_label="""Respondent proportion""",
sizing_mode="""stretch_both""")
# Create dummy data for the plot
_df = pd.DataFrame({'Date': ['2030-12-31', '2031-12-31'],
'Democratic proportion': [0.1, 0.9],
'Democratic upper': [0.3, 0.9],
'Democratic lower': [0.1, 0.7],
'Republican proportion': [0.9, 0.1],
'Republican upper': [0.9, 0.3],
'Republican lower': [0.7, 0.1]})
_df['Date'] = pd.to_datetime(_df['Date'])
self.cds = ColumnDataSource(_df)
# Draw dummy lines
_dgl = self.statetime.line(
x='Date',
y='Democratic proportion',
line_color='blue',
line_width=2,
source=self.cds)
_rgl = self.statetime.line(
x='Date',
y='Republican proportion',
line_color='red',
line_width=2,
source=self.cds)
# Add circles for polls
_df = pd.DataFrame({'Date': ['2030-12-31', '2031-12-31'],
'Pollster': ['Good pollster', 'Bad pollster'],
'Poll ID': [0, 1],
'Democratic proportion': [0.2, 0.8],
'Republican proportion': [0.8, 0.2]})
_df['Date'] = pd.to_datetime(_df['Date'])
self.cds_polls = ColumnDataSource(_df)
_dgp = self.statetime.circle(x='Date',
y='Democratic proportion',
size=10,
fill_color='blue',
line_color='blue',
alpha=0.2,
source=self.cds_polls)
_rgp = self.statetime.circle(x='Date',
y='Republican proportion',
size=10,
fill_color='red',
line_color='red',
alpha=0.2,
source=self.cds_polls)
# Add upper and lower 95% confidence
_bandd = self.statetime.varea(
x='Date',
y1='Democratic lower',
y2='Democratic upper',
source=self.cds,
fill_color='blue',
fill_alpha=0.1)
_bandr = self.statetime.varea(
x='Date',
y1='Republican lower',
y2='Republican upper',
source=self.cds,
fill_color='red',
fill_alpha=0.1)
# Legend
# ------
# Add a legend outside of the plot
_legend = Legend(items=[('Democratic trend', [_dgl]),
('Democratic 95%', [_bandd]),
('Republican trend', [_rgl]),
('Republican 95%', [_bandr]),
('Democratic poll result', [_dgp]),
('Republican poll result', [_rgp])],
location='top_right')
self.statetime.add_layout(_legend, 'right')
self.statetime.legend.click_policy = "hide"
self.statetime.y_range.only_visible = True
# Hover tip
# ---------
# Now set up the hover tool
hover = HoverTool(point_policy="follow_mouse",
renderers=[_dgp, _rgp],
tooltips=[("Pollster", "@Pollster"),
("Poll ID", "@{Poll ID}"),
("Democratic",
"@{Democratic proportion}{%0.1f}"),
("Republican",
"@{Republican proportion}{%0.1f}")])
self.statetime.add_tools(hover)
# Legend policies
# ---------------
self.statetime.legend.click_policy = "hide"
self.statetime.y_range.only_visible = True
# Select state
# ============
self.selectstate = Select(
title="""State""",
options=['dummy1', 'dummy2', 'dummy3'],
value="""dummy1""",
sizing_mode="stretch_width")
# Layout
# ======
r1 = row(children=[Spacer(width=10),
self.selectstate,
Spacer(width=10)],
sizing_mode='stretch_width')
self.layout = column(children=[self.statetime,
r1,
Spacer(height=75,
sizing_mode='scale_width')],
sizing_mode='stretch_both')
self.panel = Panel(child=self.layout,
title='Time forecast by state')
# %%
def setup(self):
"""Set up object.
Second part of two-part initialization.
Place initialization code here that's more likely to fail.
"""
# Setup the callbacks.
self.selectstate.on_change(
"value",
self.callback_selectstate)
# %%
def update(self, state, polls):
"""Update view object."""
# Make a copy of the state data
self.state = state.copy()
self.polls = polls
self.state['Democratic lower'] =\
self.state['Democratic proportion'] - self.state['Democratic SE']
self.state['Democratic upper'] =\
self.state['Democratic proportion'] + self.state['Democratic SE']
self.state['Republican lower'] =\
self.state['Republican proportion'] - self.state['Republican SE']
self.state['Republican upper'] =\
self.state['Republican proportion'] + self.state['Republican SE']
# Update the selection with the states
_states = self.state['State name'].unique().tolist()
self.selectstate.options = _states
self.selectstate.value = sample(_states, 1)[0]
# Update the chart
self._update_chart(self.selectstate.value)
# %%
def _update_chart(self, state):
"""Update chart based on date."""
# Trend data
# ----------
_slice = self.state[self.state['State name'] == state]
self.cds.data = {
'Date': _slice['Date'].to_list(),
'Democratic proportion': _slice['Democratic proportion'].to_list(),
'Republican proportion': _slice['Republican proportion'].to_list(),
'Democratic lower': _slice['Democratic lower'].to_list(),
'Democratic upper': _slice['Democratic upper'].to_list(),
'Republican lower': _slice['Republican lower'].to_list(),
'Republican upper': _slice['Republican upper'].to_list()
}
# Poll data
# ---------
_slice = self.polls[self.polls['State name'] == state]
self.cds_polls.data = {
'Date': _slice['end_date'].to_list(),
'Pollster': _slice['pollster'].to_list(),
'Poll ID': _slice['poll_id'].to_list(),
'Democratic proportion': (_slice['Democratic']/100).to_list(),
'Republican proportion': (_slice['Republican']/100).to_list()
}
# %%
def callback_selectstate(self, attrname, old, new):
"""Execute callback for self.callback_selectstate."""
self._update_chart(self.selectstate.value)
|
{"/controller/controller.py": ["/model/model.py", "/view/about.py", "/view/managedata.py", "/view/runforecast.py", "/view/forecastbytime.py", "/view/forecastdistribution.py", "/view/forecastbygeography.py", "/view/forecastbystate.py", "/view/pollviewer.py"], "/main.py": ["/controller/controller.py"], "/model/model.py": ["/model/statemodel.py", "/model/electoralcollegemodel.py"]}
|
27,767
|
RossXli/Bokeh_Showcase_Silkworm
|
refs/heads/master
|
/model/statemodel.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Project: silkworm.
Description:
Silkworm is a poll-based US Presidential Election forecaster.
Author: Mike Woodward
Created on: 2020-07-26
"""
# %%---------------------------------------------------------------------------
# Module metadata
# -----------------------------------------------------------------------------
__author__ = "Mike Woodward"
__license__ = "MIT"
# %%---------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import numpy as np
import pandas as pd
import numpy
import scipy
import scipy.special
# %%---------------------------------------------------------------------------
# Constants
# -----------------------------------------------------------------------------
CONFIDENCE95 = 1.96
# %%---------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
def sigma(spread, observations):
"""Return the standard deviation of the proportion."""
return numpy.sqrt((1 - pow(spread, 2)) / observations)
def win_prob(spread, observations):
"""Return the win probability for a candidate.
spread is that candidate's spread over their closest rival.
std_dev is the std_dev of the proportion.
"""
return 0.5*(1.0 + scipy.special.erf(
spread/(numpy.sqrt(2.0)*sigma(spread, observations))))
# %%---------------------------------------------------------------------------
# StateModel
# -----------------------------------------------------------------------------
class StateModel():
"""Models the state election results."""
# %%
def __init__(self,
results,
polls,
election_year):
"""Initialize."""
self.year = election_year
self.polls = polls
self.start_date = pd.to_datetime('{0}-01-01'.format(self.year))
# The state dataframe will hold the results. We're going to seed
# the state frame with the results from the previous election, hence
# the -4.
self.state = results[results['Year'] == election_year - 4].copy()
# %%
def setup(self):
"""
Set up the data structures.
Riskier setup done here, so init method less likely to fail.
"""
# Setup the states. Use 1st January of the year as our starting point.
# Work out a Democratic and Republican probability of winning using
# the previous election results. The use of the number 100 to get the
# probabilities is a 'fudge' factor to introduce some uncertainty into
# the analysis. Because 3rd party candidates haven't come in first
# or second place in any recent election, I'm going to ignore them
# here and set the Republican probability to be 1-Democratic
# probability.
self.state = (self.state
.assign(**{'All votes':
(lambda x:
x['Democratic votes'] +
x['Other votes'] +
x['Republican votes'])})
.assign(**{'Democratic proportion':
(lambda x:
(x['Democratic votes']/x['All votes']))})
.assign(**{'Republican proportion':
(lambda x:
(x['Republican votes']/x['All votes']))})
.assign(**{'Spread D-R':
(lambda x:
(x['Democratic votes'] -
x['Republican votes']) /
x['All votes'])})
.assign(**{'Date': self.start_date})
.assign(**{"Democratic probability":
lambda x: win_prob(x['Spread D-R'], 100)})
.drop(columns=['Democratic votes',
'Other votes',
'Republican votes',
'All votes',
'Democratic electoral',
'Other electoral',
'Republican electoral',
'Year']))
# Pandas works faster when we pre-allocate memory as opposed
# to growing dataframes one entry at a time. What we're going to
# do is add dates to the self.state dataframe to grow to the
# corect size. This will lead to lots of NAs that we'll overwrite
# later.
# Step1. Get the date range
# The last day we can forecast is the date of the most recent poll.
_dates = pd.DataFrame(
{'Date': pd.date_range(start=self.start_date,
end=self.polls['end_date'].max())})
# Create column to do cartesian join on
_dates['_m'] = _dates.shape[0]*[1]
# Get state abbreviations and add column to join on
_states = pd.DataFrame({'State abbreviation':
self.state['State abbreviation'].unique()})
_states['_m'] = _states.shape[0]*[1]
# Now do the cartesian join to give every state and every date
_cartesian = _dates.merge(_states,
on='_m',
how='inner').drop(columns=['_m'])
# Step 2. Now add it to the state frame to 'reserve' memory.
# This gives us the January 1 data and NA entries for every
# subsequent date.
self.state = self.state.merge(_cartesian,
on=['Date', 'State abbreviation'],
how='outer')
# We only care about polls that occurred after our start date.
# Sort the polls by state and end_date.
self.polls = (self.polls[self.polls['end_date'] >= self.start_date]
.sort_values(by=['State abbreviation', 'end_date'],
ascending=[True, True]))
# %%
def update(self):
"""
Update the state forecast with poll data.
This method re-creates the state-level forecast from scratch each
time because the new poll data may contain additional polls
conducted in the past.
"""
# Setup
# -----
# This is the window size, but because we use <= and >=, it's actually
# the window size -1. This is a safer implementationm
window = 6
# Build state frame from polling data
# -----------------------------------
# Step through each state
for state in self.polls['State abbreviation'].unique():
state_slice = self.polls[self.polls['State abbreviation'] == state]
# We need to step through each date to calculate an aggregate.
# Obvously, we'll use the poll dates, but there's a corner case
# where we have two polls on adjacent days. Using an entirely
# backwards looking algorthm (poll end_date - 6 days) will
# give an incorrect result in this case. So we look forward a
# week to capture the corner case.
_dummy = state_slice['end_date'].unique()
_dates = np.unique(np.sort(np.concatenate(
(_dummy, _dummy + pd.Timedelta(window, unit='d')))))
# Step through each unique poll date
for end_date in _dates:
# Slice the data so there are window + 1 days' polls in the
# slice. The sort is very important for the median
# sample size calculation which comes next.
date_slice = (state_slice[
(state_slice['end_date'] <= end_date) &
(state_slice['end_date'] >=
end_date - pd.Timedelta(window, unit='d'))]).sort_values(
'Spread D-R')
# Aggregate over these window+1 days worth of polls
spread = date_slice['Spread D-R'].median()
# Get the sample size for the median, either directly or as
# an 'estimate'.
# If the slice is an odd number, the median is the middle
# value.
# Note the proportion of Democratic and Republican voters
# will not sum to 1 in most cases due to 3rd party
# candidates and don't knows/won't say.
if date_slice.shape[0] % 2 != 0:
observations = date_slice.iloc[
date_slice.shape[0]//2]['sample_size']
democratic = date_slice.iloc[
date_slice.shape[0]//2]['Democratic']/100
republican = date_slice.iloc[
date_slice.shape[0]//2]['Republican']/100
# The slice is an even number, so the median is between two
# values.
else:
upr = date_slice.shape[0]//2
lwr = upr - 1
observations = int(
sum([date_slice.iloc[lwr]['sample_size'],
date_slice.iloc[upr]['sample_size']])/2)
democratic = (
sum([date_slice.iloc[lwr]['Democratic'],
date_slice.iloc[upr]['Democratic']])/2)/100
republican = (
sum([date_slice.iloc[lwr]['Republican'],
date_slice.iloc[upr]['Republican']])/2)/100
probability_democratic = win_prob(spread, observations)
self.state.loc[
(self.state['State abbreviation'] == state) &
(self.state['Date'] == end_date),
'Observations'] = observations
self.state.loc[
(self.state['State abbreviation'] == state) &
(self.state['Date'] == end_date), 'Spread D-R'] = spread
self.state.loc[
(self.state['State abbreviation'] == state) &
(self.state['Date'] == end_date),
'Democratic probability'] = probability_democratic
self.state.loc[
(self.state['State abbreviation'] == state) &
(self.state['Date'] == end_date),
'Democratic proportion'] = democratic
self.state.loc[
(self.state['State abbreviation'] == state) &
(self.state['Date'] == end_date),
'Republican proportion'] = republican
# Fill in state table
# -------------------
self.state['Republican probability'] = \
1 - self.state['Democratic probability']
# Using linear interpolation - we might want a smoother function in
# the future. Important to sort in the correct order first. This
# line of code relies on the first entry for each state being present.
self.state.loc[~self.state['Observations'].isna(), 'Democratic SE'] = \
(CONFIDENCE95*numpy.sqrt(
(self.state[
'Democratic proportion']*(1-self.state[
'Democratic proportion']))/self.state['Observations']))
self.state.loc[~self.state['Observations'].isna(), 'Republican SE'] = \
(CONFIDENCE95*numpy.sqrt(
(self.state[
'Republican proportion']*(1-self.state[
'Republican proportion']))/self.state['Observations']))
self.state = self.state.sort_values(['State abbreviation', 'Date'])
_states = []
for _state in self.state['State abbreviation'].unique():
_states.append(self.state[self.state['State abbreviation']
== _state].interpolate(method='linear'))
self.state = pd.concat(_states)
|
{"/controller/controller.py": ["/model/model.py", "/view/about.py", "/view/managedata.py", "/view/runforecast.py", "/view/forecastbytime.py", "/view/forecastdistribution.py", "/view/forecastbygeography.py", "/view/forecastbystate.py", "/view/pollviewer.py"], "/main.py": ["/controller/controller.py"], "/model/model.py": ["/model/statemodel.py", "/model/electoralcollegemodel.py"]}
|
27,768
|
RossXli/Bokeh_Showcase_Silkworm
|
refs/heads/master
|
/view/about.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Project: silkworm.
Description:
Silkworm is a poll-based US Presidential Election forecaster.
Author: Mike Woodward
Created on: 2020-07-26
"""
# %%---------------------------------------------------------------------------
# Module metadata
# -----------------------------------------------------------------------------
__author__ = "Mike Woodward"
__license__ = "MIT"
# %%---------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
from bokeh.models.widgets import (Div,
Panel)
from bokeh.layouts import column
# %%---------------------------------------------------------------------------
# Constants
# -----------------------------------------------------------------------------
# %%---------------------------------------------------------------------------
# About
# -----------------------------------------------------------------------------
class About():
"""Introduce software."""
# %%
def __init__(self, controller):
"""Initialize object.
First part of two-part initialization.
Put initialization code here that's very unlikely to fail.
"""
self.controller = controller
# First column HTML.
self.column1 = Div(
text="Placeholder",
sizing_mode="""stretch_width""")
# Layout the widgets
self.layout = column(children=[self.column1],
sizing_mode='scale_width')
self.panel = Panel(child=self.layout,
title='About')
# %%
def setup(self):
"""Set up object.
Second part of two-part initialization.
Place initialization code here that's more likely to fail.
"""
view_folder = os.path.dirname(os.path.realpath(__file__))
# Read in HTML from disk
with open(os.path.join(view_folder, 'about_html.html'),
'r') as html:
text = html.read()
self.column1.text = text
# %%
def update(self):
"""Update view object.
By default, just a stub. Depending
on your implementation, it might not be needed.
"""
pass
|
{"/controller/controller.py": ["/model/model.py", "/view/about.py", "/view/managedata.py", "/view/runforecast.py", "/view/forecastbytime.py", "/view/forecastdistribution.py", "/view/forecastbygeography.py", "/view/forecastbystate.py", "/view/pollviewer.py"], "/main.py": ["/controller/controller.py"], "/model/model.py": ["/model/statemodel.py", "/model/electoralcollegemodel.py"]}
|
27,769
|
RossXli/Bokeh_Showcase_Silkworm
|
refs/heads/master
|
/main.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Project: silkworm.
Description:
Silkworm is a poll-based US Presidential Election forecaster.
Author: Mike Woodward
Created on: 2020-07-26
"""
# %%---------------------------------------------------------------------------
# Module metadata
# -----------------------------------------------------------------------------
__author__ = "Mike Woodward"
__license__ = "MIT"
# %%---------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from controller.controller import Controller
# %%---------------------------------------------------------------------------
# main
# -----------------------------------------------------------------------------
# This code is called by the Bokeh server.
# No if __name__ here because of the way that Bokeh works.
controller = Controller()
controller.setup()
# display must be called after setup or else callbacks don't work
controller.display()
controller.update()
|
{"/controller/controller.py": ["/model/model.py", "/view/about.py", "/view/managedata.py", "/view/runforecast.py", "/view/forecastbytime.py", "/view/forecastdistribution.py", "/view/forecastbygeography.py", "/view/forecastbystate.py", "/view/pollviewer.py"], "/main.py": ["/controller/controller.py"], "/model/model.py": ["/model/statemodel.py", "/model/electoralcollegemodel.py"]}
|
27,770
|
RossXli/Bokeh_Showcase_Silkworm
|
refs/heads/master
|
/view/runforecast.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Project: silkworm.
Description:
Silkworm is a poll-based US Presidential Election forecaster.
Author: Mike Woodward
Created on: 2020-07-26
"""
# %%---------------------------------------------------------------------------
# Module metadata
# -----------------------------------------------------------------------------
__author__ = "Mike Woodward"
__license__ = "MIT"
# %%---------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from bokeh.models.widgets import (Button,
Div,
Panel,
Select,
TextAreaInput)
from bokeh.layouts import column, row, Spacer
# %%---------------------------------------------------------------------------
# RunForecast
# -----------------------------------------------------------------------------
class RunForecast():
"""Run the forecast."""
# %%
def __init__(self, controller):
"""Initialize object.
First part of two-part initialization.
Put initialization code here that's very unlikely to fail.
"""
self.controller = controller
# Explains how to run the model.
self.headingrunexplain = Div(
text="""You can run an analysis or load an existing analysis. """
"""Select the year for analysis then run the forecast.""",
sizing_mode="""stretch_width""")
self.loadedheading = Div(
text="""<span style='font-weight:bold;font-size:14pt'>"""
"""Year currently loaded.</span>""",
sizing_mode="""stretch_width""")
self.loaded = Div(
text="""<span style='color:red;font-size:12pt'>"""
"""No analysis year loaded.</span>""",
sizing_mode="""stretch_width""")
self.datainsystemheading = Div(
text="""<span style='font-weight:bold;font-size:14pt'>"""
"""Analysis years in system.</span>""",
sizing_mode="""stretch_width""")
self.datainsystem = TextAreaInput(
title="""Elections years analyzed in system""",
value="""No years in system""",
rows=1)
self.selecttheyeartoload = Select(
title="""Year to load and display""",
options=['dummy1', 'dummy2', 'dummy3'],
value="""dummy1""")
# Load year button
self.loadyear = Button(
label="""Load year""",
width=300,
button_type="""success""")
self.forecastheading = Div(
text="""<span style='font-weight:bold;font-size:14pt'>"""
"""Election year to forecast.</span>""",
sizing_mode="""stretch_width""")
# Menu of available Presidential elections to forecast.
self.selecttheyeartoforecast = Select(
title="""Year to forecast""",
options=['dummy1', 'dummy2', 'dummy3'],
value="""dummy1""")
# Run forecast button
self.runforecast = Button(
label="""Run forecast""",
width=300,
button_type="""success""")
# Shows status of the forecast model.
self.statusreport = TextAreaInput(
title="""Forecast run response""",
value="""No forecast results run.""",
sizing_mode="""stretch_width""",
rows=6)
# Layout the widgets
r1 = row(children=[self.headingrunexplain])
c1 = column(children=[self.forecastheading,
self.selecttheyeartoforecast,
self.runforecast,
self.statusreport])
c2 = column(children=[self.datainsystemheading,
self.datainsystem,
self.selecttheyeartoload,
self.loadyear])
c3 = column(children=[self.loadedheading,
self.loaded])
self.layout = column(children=[r1,
row(children=[c1,
Spacer(width=40),
c2,
Spacer(width=40),
c3])],
sizing_mode='scale_width')
self.panel = Panel(child=self.layout,
title='Run/load forecast')
# %%
def setup(self):
"""Set up object.
Second part of two-part initialization.
Place initialization code here that's more likely to fail.
"""
# Setup the callbacks.
self.runforecast.on_click(
self.callback_runforecast)
self.selecttheyeartoload.on_change(
"value",
self.callback_selecttheyeartoload)
self.loadyear.on_click(
self.callback_loadyear)
self.selecttheyeartoforecast.on_change(
"value",
self.callback_selecttheyeartoforecast)
# %%
def update(self, years):
"""Update view object."""
self.datainsystem.value = \
' | '.join([str(y) for y in years['analysis']])
_available = list(set(years['summary']) &
set(years['allocations']) &
set(years['polls']))
self.selecttheyeartoforecast.options = [str(year) for year
in _available]
self.selecttheyeartoforecast.value = str(max(_available))
self.selecttheyeartoload.options = [str(year) for year
in _available]
self.selecttheyeartoload.value = str(max(_available))
# %%
def callback_runforecast(self):
"""Execute callback for the Button attribute self.runforecast."""
self.statusreport.value = self.controller.calculate_forecast(
int(self.selecttheyeartoforecast.value))
self.controller.update()
# %%
def callback_loadyear(self):
"""Execute callback for the Button attribute self.loadyear."""
_year = int(self.selecttheyeartoload.value)
_text = self.controller.load_forecast(_year)
self.loaded.text = ("""<span style='font-weight:bold;"""
"""color:purple;font-size:64pt'>{0}"""
"""</span>"""
"""<br>"""
"""<span style='font-size:10pt'>"""
"""{1}</span>""").format(_year, _text)
# %%
def callback_selecttheyeartoload(self, attrname, old, new):
"""Execute callback for self.selecttheyeartoload."""
self.loadyear.label = """Load year {0}""".format(new)
# %%
def callback_selecttheyeartoforecast(self, attrname, old, new):
"""Execute callback for self.selecttheyeartoforecast."""
self.runforecast.label = """Run forecast for year {0}""".format(new)
|
{"/controller/controller.py": ["/model/model.py", "/view/about.py", "/view/managedata.py", "/view/runforecast.py", "/view/forecastbytime.py", "/view/forecastdistribution.py", "/view/forecastbygeography.py", "/view/forecastbystate.py", "/view/pollviewer.py"], "/main.py": ["/controller/controller.py"], "/model/model.py": ["/model/statemodel.py", "/model/electoralcollegemodel.py"]}
|
27,771
|
RossXli/Bokeh_Showcase_Silkworm
|
refs/heads/master
|
/model/model.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Project: silkworm.
Description:
Silkworm is a poll-based US Presidential Election forecaster.
Author: Mike Woodward
Created on: 2020-07-26
"""
# %%---------------------------------------------------------------------------
# Module metadata
# -----------------------------------------------------------------------------
__author__ = "Mike Woodward"
__license__ = "MIT"
# %%---------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
import glob
import pandas as pd
import requests
# try-except to handle execution as a standalone and as part of Bokeh
# application
try:
from model.statemodel import StateModel
from model.electoralcollegemodel import ElectoralCollegeModel
except ModuleNotFoundError:
from statemodel import StateModel
from electoralcollegemodel import ElectoralCollegeModel
# %%---------------------------------------------------------------------------
# Decorators
# -----------------------------------------------------------------------------
def reset_error(func):
"""Reset error handling."""
def func_wrapper(*args):
"""Reset error handling."""
args[0].error_status = False
args[0].error_message = ''
return func(*args)
return func_wrapper
# %%---------------------------------------------------------------------------
# Constants
# -----------------------------------------------------------------------------
RAWDATA = 'rawdata'
PROCESSEDDATA = 'processeddata'
# %%---------------------------------------------------------------------------
# Model
# -----------------------------------------------------------------------------
class Model():
"""Class Model is part of the model-view-controller architecture.
It contains the data model - the data used in the application.
"""
# %%
@reset_error
def __init__(self):
"""Initialize object. First part of two-part initialization.
Put initialization code here that's very unlikely to fail. This
approach enables us to build a UI that can handle error messages
before errors occur.
"""
self.model_folder = os.path.dirname(os.path.realpath(__file__))
self.summary = None
self.allocations = None
self.results = None
self.polls = None
self.electoral = None
self.state = None
# %%
@reset_error
def read_rawdata(self):
"""Read in the raw data necessary to make a forecast."""
# State names
# ===========
names = pd.read_csv(os.path.join(self.model_folder,
RAWDATA,
'StateNames.csv'))
# Election summary
# ================
self.summary = pd.read_csv(os.path.join(self.model_folder,
RAWDATA,
'ElectionSummary.csv'),
parse_dates=['Election date'])
# Electoral college allocations
# =============================
self.allocations = pd.read_csv(
os.path.join(self.model_folder,
RAWDATA,
'ElectoralCollegeAllocations.csv'))
# Reformat data
states = self.allocations['State abbreviation'].tolist()
self.allocations = (
self.allocations.set_index('State abbreviation')
.transpose()
.reset_index()
.fillna(0)
.rename(columns={'index': 'Year'}))
self.allocations = pd.melt(self.allocations,
id_vars=['Year'],
value_vars=states,
var_name='State abbreviation',
value_name='Allocation')
# Tidying up
self.allocations['Allocation'] = \
self.allocations['Allocation'].astype(int)
self.allocations['Year'] = \
self.allocations['Year'].astype(int)
# Election results
# ================
self.results = pd.read_csv(os.path.join(self.model_folder,
RAWDATA,
'ElectionResults.csv'))
# Polls
# =====
# Using low memory because of warning - file is small enough
# this this is OK
self.polls = pd.read_csv(os.path.join(self.model_folder,
RAWDATA,
'Polls_2020.csv'),
parse_dates=['start_date',
'end_date'],
low_memory=False)
# Renaming and tidying up data
# ----------------------------
# Rename columns - renaming any columns I need to merge on or
# that I alter in some way
self.polls = (self.polls.rename(
columns={'cycle': 'Year',
'candidate_party': 'Party',
'candidate_name': 'Candidate name',
'state': 'State name'}))
# Add a state abbreviations column
self.polls = self.polls.merge(names,
on='State name',
how='left')
# Change dataframe contents
replace_dict = [{'col': 'Party', 'old': 'DEM', 'new': 'Democratic'},
{'col': 'Party', 'old': 'REP', 'new': 'Republican'},
{'col': 'Candidate name',
'old': 'Biden', 'new': 'Joe Biden'},
{'col': 'Candidate name',
'old': 'Trump', 'new': 'Donald Trump'}]
for replace in replace_dict:
self.polls.loc[
self.polls[
replace['col']].str.contains(
replace['old']), replace['col']] = replace['new']
# Filtering - generic
# -------------------
# Filter for state polls, just Democratic and Republican and
# for just two named candidates
self.polls = self.polls[(~self.polls['State name'].isnull()) &
(self.polls['Party'].isin(['Democratic',
'Republican'])) &
(self.polls['Year'] == 2020) &
(self.polls['Candidate name'].isin(
['Donald Trump', 'Joe Biden']))]
# Some polls are for hypothetical match ups and removing candidates who
# didn't make the final ticket can leave us with odd results,
# so we need to remove all surveys and questions where just one
# candidate is left after the previous clean up.
two_candidates = (self.polls[['poll_id',
'question_id',
'Candidate name']]
.groupby(['poll_id', 'question_id'])
.nunique()['Candidate name']
.reset_index()
.query('`Candidate name` == 2')
[['poll_id', 'question_id']]
.drop_duplicates())
self.polls = self.polls.merge(two_candidates,
on=['poll_id', 'question_id'],
how='inner')
# Filtering - poll specific
# -------------------------
# Some polls require qualification, e.g. the same results are
# presented in two or more seperate ways. We need to filter specific
# polls here.
# Higher and lower likelihood of turnout - some Monmouth
# polls report three variants - remove higher and lower variants
self.polls = self.polls[
~(
(self.polls['poll_id'].isin([67821, 67101, 67920,
69464])) &
(self.polls['notes'].isin(['lower likely turnout',
'higher likely turnout']))
)]
self.polls = self.polls[
~(
(self.polls['poll_id'].isin([70599])) &
(self.polls['notes'].isin(['low likely turnout',
'high likely turnout']))
)]
self.polls = self.polls[
~(
(self.polls['poll_id'].isin([70780])) &
(self.polls['notes'].isin(['low likely turnout',
'high likely turnout']))
)]
self.polls = self.polls[
~(
(self.polls['poll_id'].isin([71090])) &
(self.polls['notes'].isin(['low turnout model',
'high turnout model']))
)]
self.polls = self.polls[
~(
(self.polls['poll_id'].isin([71548])) &
(self.polls['notes'].isin(['lower turnout model',
'higher turnout model']))
)]
self.polls = self.polls[
~(
(self.polls['poll_id'].isin([72146])) &
(self.polls['notes'].isin(['lower turnout model',
'higher turnout model']))
)]
self.polls = self.polls[
~(
(self.polls['poll_id'].isin([72214])) &
(self.polls['notes'].isin(['higher turnout model',
'lower turnout model']))
)]
self.polls = self.polls[
~(
(self.polls['poll_id'].isin([72599])) &
(self.polls['notes'].isin(['high likely turnout',
'low likley turnout']))
)]
# Arizona poll where first question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 70079) &
(self.polls['question_id'] == 130554))]
# Arizona poll where second question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 67934) &
(self.polls['question_id'] == 127187))]
# Arizona poll where first question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 71007) &
(self.polls['question_id'] == 132884))]
# Arizona poll where second question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 71067) &
(self.polls['question_id'] == 133042))]
# Arizona polls - removing higher and lower voter turnout
self.polls = self.polls[~((self.polls['poll_id'] == 69513) &
(self.polls['question_id'] == 129488))]
self.polls = self.polls[~((self.polls['poll_id'] == 69513) &
(self.polls['question_id'] == 129489))]
# Arizona poll where first question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 71621) &
(self.polls['question_id'] == 134170))]
# Arizona poll where second question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 71753) &
(self.polls['question_id'] == 134445))]
# Arizona poll where first question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 72166) &
(self.polls['question_id'] == 135353))]
# Arizona poll where first question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 72653) &
(self.polls['question_id'] == 136341))]
# Colorado poll where second question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 69433) &
(self.polls['question_id'] == 129320))]
# Florida poll where second question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 66308) &
(self.polls['question_id'] == 123433))]
# Florida poll where first question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 71006) &
(self.polls['question_id'] == 132883))]
# Florida poll where first question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 71620) &
(self.polls['question_id'] == 134168))]
# Florida poll where first question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 72167) &
(self.polls['question_id'] == 135354))]
# Florida poll where first question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 72658) &
(self.polls['question_id'] == 136351))]
# Georgia poll where second question lower/higher
self.polls = self.polls[~((self.polls['poll_id'] == 69690) &
(self.polls['question_id'] == 129931))]
self.polls = self.polls[~((self.polls['poll_id'] == 69690) &
(self.polls['question_id'] == 129932))]
# Georgia poll where second question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 69937) &
(self.polls['question_id'] == 130229))]
# Iowa poll where second question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 67935) &
(self.polls['question_id'] == 127189))]
# Iowa poll Monmouth poll
self.polls = self.polls[~((self.polls['poll_id'] == 69943) &
(self.polls['question_id'] == 130247))]
self.polls = self.polls[~((self.polls['poll_id'] == 70080) &
(self.polls['question_id'] == 130555))]
# Iowa poll where first question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 70080) &
(self.polls['question_id'] == 130554))]
# Kansas poll where second question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 69938) &
(self.polls['question_id'] == 130231))]
# Kentucky poll where second question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 69939) &
(self.polls['question_id'] == 130233))]
# Maine polls represent a challenge, for now, we'll just remove
# Congressional District polls
self.polls = self.polls[~self.polls['State name'].isin(
['Maine CD-1', 'Maine CD-2'])]
# Maine poll where second question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 67936) &
(self.polls['question_id'] == 127191))]
# Maine poll where second question is a RCV Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 69587) &
(self.polls['question_id'] == 129679))]
# Maine poll where second question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 70081) &
(self.polls['question_id'] == 130559))]
# Michigan poll where question 123714 seems to exclude 3rd parties
self.polls = self.polls[~((self.polls['poll_id'] == 66406) &
(self.polls['question_id'] == 123714))]
# Michigan poll - 2nd option not clear
self.polls = self.polls[~((self.polls['poll_id'] == 57656) &
(self.polls['question_id'] == 93510))]
# Michigan poll - 2nd option not clear
self.polls = self.polls[~((self.polls['poll_id'] == 58192) &
(self.polls['question_id'] == 94749))]
# Michigan poll where second question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 69940) &
(self.polls['question_id'] == 130235))]
# Michigan poll where first question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 70785) &
(self.polls['question_id'] == 132430))]
# Michigan poll where first question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 71462) &
(self.polls['question_id'] == 133844))]
# Michigan poll where first question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 72058) &
(self.polls['question_id'] == 135091))]
# Michigan poll where first question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 72508) &
(self.polls['question_id'] == 136071))]
# Nebraska polls represent a challenge, for now, we'll just remove
# Congressional District polls
self.polls = self.polls[~self.polls['State name'].isin(
['Nebraska CD-1', 'Nebraska CD-2'])]
# New Hampshire poll with two presentations
self.polls = self.polls[
~((self.polls['poll_id'] == 62978) &
(self.polls['notes'] ==
'split sample without undecided option'))]
# New Hampshire poll where second question is a head-to-head
# Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 70045) &
(self.polls['question_id'] == 130469))]
# North Carolina poll where second question is a head-to-head
# Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 67937) &
(self.polls['question_id'] == 127193))]
# North Carolina poll where second question is a head-to-head
# Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 68464) &
(self.polls['question_id'] == 128157))]
# North Carolina poll where second question is a head-to-head
# Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 69504) &
(self.polls['question_id'] == 129476))]
# North Carolina poll where second question is a head-to-head
# Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 70044) &
(self.polls['question_id'] == 130467))]
# North Carolina poll where first question is a head-to-head
# Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 70786) &
(self.polls['question_id'] == 132431))]
# North Carolina poll where second question is a head-to-head
# Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 71441) &
(self.polls['question_id'] == 130467))]
# North Carolina poll where first question is a head-to-head
# Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 71463) &
(self.polls['question_id'] == 133846))]
# North Carolina poll where second question is a head-to-head
# Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 72059) &
(self.polls['question_id'] == 135092))]
# North Carolina poll where first question is a head-to-head
# Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 72659) &
(self.polls['question_id'] == 136352))]
# Pennsylvania poll where second question is a head-to-head
# Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 68264) &
(self.polls['question_id'] == 127878))]
# Pennsylvania poll where second question is a head-to-head
# Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 71441) &
(self.polls['question_id'] == 133794))]
self.polls = self.polls[~((self.polls['poll_id'] == 71441) &
(self.polls['question_id'] == 133793))]
# Pennsylvania poll where second question is a head-to-head
# Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 68319) &
(self.polls['question_id'] == 127966))]
# Pennsylvania poll where 2nd question is a head-to-head
# Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 70741) &
(self.polls['question_id'] == 132339))]
# Pennsylvania poll where 1st question is a head-to-head
# Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 71379) &
(self.polls['question_id'] == 133664))]
# Pennsylvania poll where 1st question is a head-to-head
# Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 71976) &
(self.polls['question_id'] == 134925))]
# Pennsylvania poll where 1st question is a head-to-head
# Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 72507) &
(self.polls['question_id'] == 136069))]
# South Carolina poll where second question is a head-to-head
# Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 70082) &
(self.polls['question_id'] == 130561))]
# Texas poll where second question is a head-to-head
# Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 70046) &
(self.polls['question_id'] == 130471))]
# # Utah poll - use UCEP educational model for now
self.polls = self.polls[
~((self.polls['poll_id'] == 66525)
& ((self.polls['notes'].isin(
['CNN education weighting',
'CPS education weighting',
'CCES education weighting']))
| (self.polls['notes'].isna())))]
# Wisconsin poll - 2nd option not clear
self.polls = self.polls[~((self.polls['poll_id'] == 57697) &
(self.polls['question_id'] == 93617))]
# Wisconsin poll where 2nd question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 70740) &
(self.polls['question_id'] == 132338))]
# Wisconsin poll where 1st question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 71380) &
(self.polls['question_id'] == 133665))]
# Wisconsin poll where 2nd question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 71975) &
(self.polls['question_id'] == 134924))]
# Wisconsin poll where 1st question is a head-to-head Trump vs. Biden
self.polls = self.polls[~((self.polls['poll_id'] == 72505) &
(self.polls['question_id'] == 136066))]
# Filtering - population type
# ---------------------------
# Some polls report likely votes and registered voters etc, we will
# preferentially select in this order: lv, rv, v, a - selecting
# one and only one variant
# A = ADULTS RV = REGISTERED VOTERS V = VOTERS LV = LIKELY VOTERS
# Step 1: check that there are no other population types. This isn't
# a show stopping error, so just report it.
if (sorted(self.polls['population'].unique()) !=
sorted(['lv', 'rv', 'v', 'a'])):
self.error_status = True
self.error_message = ("""Found an unexpected voter population """
"""type in the polls data file.""")
# Step 2: introduce a population rank
df = pd.DataFrame({'population': ['lv', 'rv', 'v', 'a'],
'population_rank': [1, 2, 3, 4]})
self.polls = self.polls.merge(
df,
on='population',
how='inner')
# Step 3: Find the poll variants with the lowest (best) rank
best_variants = (self.polls[['poll_id', 'population_rank']]
.groupby('poll_id')
.min()
.reset_index())
# Step 4: Select just those poll variants
self.polls = self.polls.merge(best_variants,
on=['poll_id', 'population_rank'],
how='inner')
# Step 5: Remove the population_rank column
self.polls = self.polls.drop(columns=['population_rank'])
# Final filtering and formatting
# ------------------------------
self.polls = self.polls[['question_id',
'poll_id',
'pollster',
'start_date',
'end_date',
'Year',
'State abbreviation',
'sample_size',
'Party',
'pct']]
# This format is slightly easier to use
self.polls = self.polls.pivot_table(index=['question_id',
'poll_id',
'pollster',
'start_date',
'end_date',
'Year',
'State abbreviation',
'sample_size'],
columns='Party',
values='pct').reset_index()
# Now calculate the spread. Note, we're using a a proportion, not a %.
self.polls['Spread D-R'] = (self.polls['Democratic']
- self.polls['Republican'])/100
# Final checks
# ------------
# Check state names and abbreviations are OK and we have 100%
# coverage
temp_ = self.polls[self.polls['State abbreviation'].isna()]
if not temp_.empty:
self.error_status = True
self.error_message = ("""Found mismatch between state names and """
"""abbreviations in polling data.""")
# %%
@reset_error
def get_years(self):
"""Get the years for which we have data or have run analysis."""
years = {'summary': [],
'allocations': [],
'results': [],
'polls': [],
'analysis': []}
if self.summary is not None and 'Year' in self.summary:
years['summary'] = (
self.summary.sort_values('Year', ascending=False)['Year']
.unique()
.tolist())
if self.allocations is not None and 'Year' in self.allocations:
years['allocations'] = (
self.allocations.sort_values('Year', ascending=False)['Year']
.unique()
.tolist())
if self.results is not None and 'Year' in self.results:
years['results'] = (
self.results.sort_values('Year', ascending=False)['Year']
.unique()
.tolist())
if self.polls is not None and 'Year' in self.polls:
years['polls'] = (
self.polls.sort_values('Year', ascending=False)['Year']
.unique()
.tolist())
# Get years for the analysis that's already been done.
_files = [file.split('/')[-1] for file in
glob.glob(os.path.join(self.model_folder,
PROCESSEDDATA,
r'*.csv'))]
_years = set([file[-8:-4] for file in _files])
years['analysis'] =\
[int(year) for year in _years
if 'electoral_maximum_{0}.csv'.format(year) in _files
and 'electoral_distribution_{0}.csv'.format(year) in _files
and 'state_{0}.csv'.format(year) in _files
and 'processed_polls_{0}.csv'.format(year) in _files]
return years
# %%
@reset_error
def cross_check(self):
"""Cross check that the data is consistent."""
text = []
# Data present
# ============
files = {'Summary': self.summary,
'Allocations': self.allocations,
'Results': self.results,
'Polls': self.polls}
for k, v in files.items():
if v is not None and 'Year' in v:
text.append("{0} data file present.".format(k))
else:
text.append("{0} data NOT file present.".format(k))
self.error_status = True
# Allocations agree
# =================
summary = (self.summary[['Year',
'Electoral College total']]
.rename(columns={'Electoral College total': 'Allocation'})
.sort_values(['Year'], ascending=False))
allocations = (self.allocations[['Year', 'Allocation']]
.groupby(['Year'])
.sum()
.reset_index())
results = (self.results
.groupby('Year')
.sum()
.reset_index()
.assign(Allocation=lambda x:
x['Democratic electoral'] +
x['Other electoral'] +
x['Republican electoral'])[['Year', 'Allocation']])
college = {'Allocations': allocations,
'Results': results}
for k, v in college.items():
combo = summary.merge(v,
on=['Year', 'Allocation'],
how='right',
indicator=True)
if combo[combo['_merge'] != 'both'].empty:
text.append(
'{0} - electoral college allocations agree.'.format(k))
else:
text.append("{0} - electoral college allocations don't agree. "
"Table of disagreements follows.".format(k))
text.append(combo[combo['_merge'] != 'both'].to_html())
self.error_status = True
# Polls consistent
# ================
# Find potential duplicates
pot_dups = (self.polls[['poll_id', 'question_id']]
.groupby(['poll_id'])
.nunique()
.query('question_id > 1')
[['question_id']]
.reset_index()['poll_id'])
if not pot_dups.empty:
text.append("Found unhandled potential duplicate polls in "
"polling file. ")
text.append(str(pot_dups.to_list()))
self.error_message = ("""Found unhandled potential duplicate """
"""polls in polling file. """
"""Duplicate are: {0}."""
.format(str(pot_dups.to_list())))
self.error_status = True
else:
text.append('Poll data has no duplicate questions.')
return '\n'.join(text)
# %%
@reset_error
def fetch_polls(self, year):
"""Fetch polling data from 538."""
if year == 2020:
url = ("""https://projects.fivethirtyeight.com"""
"""/polls-page/president_polls.csv""")
request = requests.get(url)
if request.status_code != 200:
self.error_status = True
self.error_message = ("""model.fetch_polls returned """
"""an error code of {0}."""
.format(request.status_code))
return
with open(os.path.join(self.model_folder,
RAWDATA,
"polls_2020.csv"),
"wb") as poll_file:
poll_file.write(request.content)
# %%
@reset_error
def calculate_forecast(self, year):
"""Forecast the results of the Presidential election."""
# State model
# ===========
# Build the state model
statemodel = StateModel(results=self.results,
polls=self.polls,
election_year=year)
# Sets up more risky intialization that might fail
statemodel.setup()
# Calculates the state-level model
statemodel.update()
# Write the state data to disk
statemodel.state.to_csv(os.path.join(self.model_folder,
PROCESSEDDATA,
'state_{0}.csv'.format(year)),
index=False)
# Electoral college data
# ======================
# Now build the electoral college forecast model
electoralmodel = ElectoralCollegeModel(state=statemodel.state,
allocations=self.allocations,
election_year=year)
electoralmodel.setup()
# Calculates the electoral college model
electoralmodel.update()
# Write the electoral college data to disk
electoralmodel.electoral_maximum.to_csv(
os.path.join(self.model_folder,
PROCESSEDDATA,
'electoral_maximum_{0}.csv'.format(year)),
index=False)
electoralmodel.electoral_distribution.to_csv(
os.path.join(self.model_folder,
PROCESSEDDATA,
'electoral_distribution_{0}.csv'.format(year)),
index=False)
# Polling data
# ============
# Not really a forecast, but this is a convenient place to write
# the cleaned up polling data to disk.
self.polls.to_csv(
os.path.join(self.model_folder,
PROCESSEDDATA,
'processed_polls_{0}.csv'.format(year)),
index=False)
# %%
@reset_error
def load_forecast(self, year):
"""Read in the forecast data, if present."""
# Electoral college
# =================
self.electoral_maximum = pd.read_csv(
os.path.join(self.model_folder,
PROCESSEDDATA,
'electoral_maximum_{0}.csv'.format(year)),
parse_dates=['Date'])
self.electoral_distribution = pd.read_csv(
os.path.join(self.model_folder,
PROCESSEDDATA,
'electoral_distribution_{0}.csv'.format(year)),
parse_dates=['Date'])
# State forecasts
# ===============
self.state = pd.read_csv(
os.path.join(self.model_folder,
PROCESSEDDATA,
'state_{0}.csv'.format(year)),
parse_dates=['Date'])
_names = pd.read_csv(os.path.join(self.model_folder,
RAWDATA,
'StateNames.csv'))
# Add in the State names - makes it easier to display results
self.state = self.state.merge(_names,
on='State abbreviation',
how='left')
# Polls
# =====
# Not really a forecast, but the processed polling data is used
# by the same display code that uses forecasts.
self.polls = pd.read_csv(
os.path.join(self.model_folder,
PROCESSEDDATA,
'processed_polls_{0}.csv'.format(year)),
parse_dates=['start_date', 'end_date'])
# Add in the State names - makes it easier to display results
self.polls = self.polls.merge(_names,
on='State abbreviation',
how='left')
# Only polls from January 1 of year onwards
start_date = pd.to_datetime('{0}-01-01'.format(year))
self.polls = self.polls[self.polls['end_date'] >= start_date]
# %%
# Code to test the model
if __name__ == "__main__":
model = Model()
print("*******")
print("read_rawdata")
model.read_rawdata()
print("Error status: {0}".format(model.error_status))
print("Error message: {0}".format(model.error_message))
print('*******')
print('get years')
print(model.get_years())
print("Error status: {0}".format(model.error_status))
print("Error message: {0}".format(model.error_message))
print("*******")
print("cross_check results")
print(model.cross_check())
print('Errors:')
print("Error status: {0}".format(model.error_status))
print("Error message: {0}".format(model.error_message))
print("*******")
# print("Fetch 538 data")
# model.fetch_polls(2020)
# print('Errors:')
# print("Error status: {0}".format(model.error_status))
# print("Error string: {0}".format(model.error_message))
model.calculate_forecast(2020)
model.load_forecast(2020)
|
{"/controller/controller.py": ["/model/model.py", "/view/about.py", "/view/managedata.py", "/view/runforecast.py", "/view/forecastbytime.py", "/view/forecastdistribution.py", "/view/forecastbygeography.py", "/view/forecastbystate.py", "/view/pollviewer.py"], "/main.py": ["/controller/controller.py"], "/model/model.py": ["/model/statemodel.py", "/model/electoralcollegemodel.py"]}
|
27,772
|
RossXli/Bokeh_Showcase_Silkworm
|
refs/heads/master
|
/model/uniform.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 4 10:17:07 2020.
@author: mikewoodward
"""
import pandas as pd
from bokeh.plotting import figure, show
dice = pd.DataFrame({'score':[1,2,3,4,5,6],
'frequency': [53222, 52118, 52465, 52338, 52244, 53285]})
chart = figure(title="""Labby's dice data.""")
chart.vbar(x=dice['score'], top=dice['frequency'])
show(chart)
|
{"/controller/controller.py": ["/model/model.py", "/view/about.py", "/view/managedata.py", "/view/runforecast.py", "/view/forecastbytime.py", "/view/forecastdistribution.py", "/view/forecastbygeography.py", "/view/forecastbystate.py", "/view/pollviewer.py"], "/main.py": ["/controller/controller.py"], "/model/model.py": ["/model/statemodel.py", "/model/electoralcollegemodel.py"]}
|
27,773
|
RossXli/Bokeh_Showcase_Silkworm
|
refs/heads/master
|
/view/forecastbygeography.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Project: silkworm.
Description:
Silkworm is a poll-based US Presidential Election forecaster.
Author: Mike Woodward
Created on: 2020-07-26
"""
# %%---------------------------------------------------------------------------
# Module metadata
# -----------------------------------------------------------------------------
__author__ = "Mike Woodward"
__license__ = "MIT"
# %%---------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import json
import os
import random
from bokeh.models.widgets import (DateSlider,
Panel)
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, HoverTool
from bokeh.layouts import column, row, Spacer
from bokeh.palettes import brewer
import pandas as pd
import numpy as np
# %%---------------------------------------------------------------------------
# Constants
# -----------------------------------------------------------------------------
MAP_FOLDER = 'maps'
# %%---------------------------------------------------------------------------
# ForecastByGeography
# -----------------------------------------------------------------------------
class ForecastByGeography():
"""Shows US map and electoral college votes by state."""
# %%
def __init__(self, controller):
"""Initialize object.
First part of two-part initialization.
Put initialization code here that's very unlikely to fail.
"""
self.controller = controller
self.state = None
self.state_src = None
# State maop of the US.
self.stateusmap = figure(
title="""Electoral college votes by time and geography""",
x_axis_location=None,
y_axis_location=None,
x_axis_type="""linear""",
sizing_mode="""stretch_both""")
self.stateusmap.xgrid.visible = False
self.stateusmap.ygrid.visible = False
# The date for charting.
self.choosethedatefordisplay = DateSlider(
title="""Choose the date for display""",
start="""2018-11-13T20:20:39+00:00""",
end="""2025-11-13T20:20:39+00:00""",
step=24*60*60*1000,
value="""2018-11-13T20:20:39+00:00""",
sizing_mode="stretch_width")
# Layout the widgets
row1 = row(children=[Spacer(width=10),
self.choosethedatefordisplay,
Spacer(width=10)],
sizing_mode='stretch_width')
self.layout = column(children=[self.stateusmap,
row1,
Spacer(height=75,
sizing_mode='scale_width')],
sizing_mode='stretch_both')
self.panel = Panel(child=self.layout,
title='Forecast by geography')
# %%
def setup(self):
"""Set up object.
Second part of two-part initialization.
Place initialization code here that's more likely to fail.
"""
# Load the files containing the state outlines and the Alaska/Hawaii
# dividing lines
_folder = os.path.dirname(os.path.realpath(__file__))
_state_j = json.load(
open(os.path.join(_folder, MAP_FOLDER, "state.json"), 'r'))
_state = pd.DataFrame(_state_j['data'])
_state = _state.sort_values('State abbreviation')
_state['color'] = random.choices(brewer['RdBu'][11],
k=_state.shape[0])
_state['Democratic percentage'] = np.random.rand(_state.shape[0])
_state['Republican percentage'] = np.random.rand(_state.shape[0])
_frame_j = json.load(
open(os.path.join(_folder, MAP_FOLDER, "frame.json"), 'r'))
# Set up the sources
self.state_src = ColumnDataSource(_state)
frame_src = ColumnDataSource(data=dict(x=_frame_j['data']['x'],
y=_frame_j['data']['y']))
# Draw the states and the lines
states = self.stateusmap.patches(xs='x',
ys='y',
source=self.state_src,
fill_alpha=0.5,
fill_color='color',
line_color="gray",
line_width=0.5)
# The frame that separates AK, HI from the rest of the US
self.stateusmap.multi_line(xs='x',
ys='y',
source=frame_src,
line_color="gray",
line_width=1.0)
# Now set up the hover tool - so the state name is given
hover = HoverTool(point_policy="follow_mouse",
renderers=[states],
tooltips=[("State name",
"@{State name}"),
("State abbreviation",
"@{State abbreviation}"),
("Democratic",
"@{Democratic percentage}{%0.1f}"),
("Republican",
"@{Republican percentage}{%0.1f}")])
self.stateusmap.add_tools(hover)
# Setup the callbacks.
self.choosethedatefordisplay.on_change(
"value",
self.callback_choosethedatefordisplay)
# %%
def update(self, state):
"""Update view object."""
# Make a copy of the state data and change the copy
self.state = state.copy()
self.state['color index'] = self.state['Spread D-R']*100
self.state['color index'] = pd.cut(
self.state['color index'],
[-100, -10, -5, -2, -1, -0.5, 0.5, 1, 2, 5, 10, 100],
labels=[10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0])
self.state['color'] =\
self.state['color index'].map(
{k: v for k, v in enumerate(brewer['RdBu'][11])})
self.choosethedatefordisplay.start = self.state['Date'].min()
self.choosethedatefordisplay.value = self.state['Date'].max()
self.choosethedatefordisplay.end = self.state['Date'].max()
self._update_chart(self.choosethedatefordisplay.value_as_datetime)
# %%
def _update_chart(self, date):
"""Update chart based on date."""
_slice = self.state[self.state['Date'] == date]
self.state_src.data['color'] = \
_slice[['State abbreviation',
'color']].sort_values(
'State abbreviation')['color'].to_list()
self.state_src.data['Democratic percentage'] = \
_slice[['State abbreviation',
'Democratic proportion']].sort_values(
'State abbreviation')[
'Democratic proportion'].to_list()
self.state_src.data['Republican percentage'] = \
_slice[['State abbreviation',
'Republican proportion']].sort_values(
'State abbreviation')[
'Republican proportion'].to_list()
# %%
def callback_choosethedatefordisplay(self, attrname, old, new):
"""Execute callback method for self.choosethedatefordisplay."""
# pylint: disable=W0613
self._update_chart(self.choosethedatefordisplay.value_as_datetime)
|
{"/controller/controller.py": ["/model/model.py", "/view/about.py", "/view/managedata.py", "/view/runforecast.py", "/view/forecastbytime.py", "/view/forecastdistribution.py", "/view/forecastbygeography.py", "/view/forecastbystate.py", "/view/pollviewer.py"], "/main.py": ["/controller/controller.py"], "/model/model.py": ["/model/statemodel.py", "/model/electoralcollegemodel.py"]}
|
27,774
|
RossXli/Bokeh_Showcase_Silkworm
|
refs/heads/master
|
/view/pollviewer.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Project: silkworm
Description:
Silkworm is a poll-based US Presidential Election forecaster.
Author: Mike Woodward
Created on: 2020-07-26
"""
# %%---------------------------------------------------------------------------
# Module metadata
# -----------------------------------------------------------------------------
__author__ = "Mike Woodward"
__license__ = "MIT"
# %%---------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from random import sample
import pandas as pd
from bokeh.models.widgets import (DataTable,
DateRangeSlider,
Panel,
Select,
TableColumn)
from bokeh.models import ColumnDataSource, DateFormatter
from bokeh.layouts import column, row, Spacer
# %%---------------------------------------------------------------------------
# PollViewer
# -----------------------------------------------------------------------------
class PollViewer():
"""Shows the polls in the system for the selected election."""
# %%
def __init__(self, controller):
"""Initialize object.
First part of two-part initialization.
Put initialization code here that's very unlikely to fail.
"""
self.controller = controller
self.polls = None
# Table
# =====
# Stub code for DataTable setup.
_df = pd.DataFrame(
{'State name': ['Alaska'],
'Start date': ['2020-01-01'],
'End date': ['2020-01-10'],
'Polling company': ['Good Polls Inc'],
'Poll ID': [123456],
'Sample size': [1000],
'Democratic %': [44.9],
'Republican %': [45.1]})
_df['Start date'] = pd.to_datetime(_df['Start date'])
_df['End date'] = pd.to_datetime(_df['End date'])
self.pollsource = ColumnDataSource(_df)
columns = [TableColumn(field='State name', title='State name'),
TableColumn(field='Start date',
title='State date',
formatter=DateFormatter()),
TableColumn(field='End date',
title='End date',
formatter=DateFormatter()),
TableColumn(field='Polling company',
title='Polling company'),
TableColumn(field='Poll ID', title='Poll ID'),
TableColumn(field='Sample size', title='Sample size'),
TableColumn(field='Democratic %', title='Democratic %'),
TableColumn(field='Republican %', title='Republican %')]
# Opinion polls in the system.
self.opinionpolls = DataTable(
source=self.pollsource,
columns=columns,
index_position=None,
sizing_mode="""stretch_both""")
# Other widgets
# =============
# Date range
self.choosedates = DateRangeSlider(
title="""Choose the date for display""",
start="""2018-11-13T20:20:39+00:00""",
end="""2025-11-13T20:20:39+00:00""",
step=24*60*60*1000,
value=("""2018-11-13T20:20:39+00:00""",
"""2025-11-13T20:20:39+00:00"""),
sizing_mode="stretch_width")
# State
self.selectstate = Select(
title="""State""",
options=['dummy1', 'dummy2', 'dummy3'],
value="""dummy1""",
sizing_mode="stretch_width")
# Layout the widgets
# ==================
row1 = row(children=[self.choosedates,
Spacer(width=50),
self.selectstate])
layout = column(children=[self.opinionpolls,
row1,
Spacer(height=75,
sizing_mode='scale_width')],
sizing_mode='stretch_both')
self.panel = Panel(child=layout,
title='Poll viewer')
# %%
def setup(self):
"""Set up object.
Second part of two-part initialization.
Place initialization code here that's more likely to fail.
"""
# Setup the callbacks.
self.choosedates.on_change("value", self.callback_choosedates)
self.selectstate.on_change("value", self.callback_selectstate)
# %%
def update(self, polls):
"""Update view object."""
self.polls = polls
_states = sorted(self.polls['State name'].unique().tolist())
self.selectstate.options = _states
self.selectstate.value = sample(_states, 1)[0]
self.choosedates.start = self.polls['start_date'].min()
self.choosedates.end = self.polls['end_date'].max()
self.choosedates.value = (
self.polls['start_date'].min(),
self.polls['end_date'].max())
self._update_table()
# %%
def _update_table(self):
"""Update table."""
_slice = self.polls[
(self.polls['State name'] == self.selectstate.value)
& (self.polls['start_date']
>= self.choosedates.value_as_datetime[0])
& (self.polls['end_date']
<= self.choosedates.value_as_datetime[1])
].sort_values(['start_date', 'end_date'])
self.pollsource.data = {
'State name': _slice['State name'].to_list(),
'Start date': _slice['start_date'].to_list(),
'End date': _slice['end_date'].to_list(),
'Polling company': _slice['pollster'].to_list(),
'Poll ID': _slice['poll_id'].to_list(),
'Sample size': _slice['sample_size'].to_list(),
'Democratic %': _slice['Democratic'].to_list(),
'Republican %': _slice['Republican'].to_list()}
# %%
def callback_choosedates(self, attrname, old, new):
"""Execute callback for self.callback_choosedates."""
# pylint: disable=W0613
self._update_table()
# %%
def callback_selectstate(self, attrname, old, new):
"""Execute callback for self.callback_selectstate."""
# pylint: disable=W0613
self._update_table()
|
{"/controller/controller.py": ["/model/model.py", "/view/about.py", "/view/managedata.py", "/view/runforecast.py", "/view/forecastbytime.py", "/view/forecastdistribution.py", "/view/forecastbygeography.py", "/view/forecastbystate.py", "/view/pollviewer.py"], "/main.py": ["/controller/controller.py"], "/model/model.py": ["/model/statemodel.py", "/model/electoralcollegemodel.py"]}
|
27,775
|
RossXli/Bokeh_Showcase_Silkworm
|
refs/heads/master
|
/view/managedata.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Project: silkworm.
Description:
Silkworm is a poll-based US Presidential Election forecaster.
Author: Mike Woodward
Created on: 2020-07-26
"""
# %%---------------------------------------------------------------------------
# Module metadata
# -----------------------------------------------------------------------------
__author__ = "Mike Woodward"
__license__ = "MIT"
# %%---------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from bokeh.models.widgets import (Button,
Div,
Panel,
TextAreaInput)
from bokeh.layouts import column, row
# %%---------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
# %%---------------------------------------------------------------------------
# ManageData
# -----------------------------------------------------------------------------
class ManageData():
"""Panel to manage data sources."""
# %%
def __init__(self, controller):
"""Initialize object safely.
Put initialization code here that's very unlikely to fail.
"""
self.controller = controller
self.help = Div(
text="""This tab displays the raw data in the system. """
"""You can run forecasts for any election year where you """
"""have electoral college, results, and polling data. """
"""The cross-check button cross checks the data for """
"""consistency.""",
sizing_mode="""stretch_width""")
self.datainsystemheading = Div(
text="""<span style='font-weight:bold;font-size:14pt'>"""
"""Data in system.</span>""",
sizing_mode="""stretch_width""")
# Displays the Electoral College Vote allocations by year.
self.ecvyearallocations = TextAreaInput(
title="""Electoral college vote allocations in system""",
value="""No allocations in system""",
rows=1)
# Displays the election result years in the system.
self.electionresults = TextAreaInput(
title="""Presidential election results in system""",
value="""No allocations in system""",
rows=1)
# Displays the Presidential polling in system.
self.polling = TextAreaInput(
title="""Presidential election polling in system""",
value="""No allocations in system""",
rows=1)
self.crosscheckheading = Div(
text="""<span style='font-weight:bold;font-size:14pt'>"""
"""Cross-check data.</span>""",
sizing_mode="""stretch_width""")
# Header to explain what cross-check button does.
self.headingverification = Div(
text="""Click the button to start a cross-check that """
"""the data in the system is both correct and consistent.""",
width=300)
# Starts the verification data cross-check..
self.verificationbutton = Button(
label="""Cross-check data.""",
width=300,
button_type="""success""")
# Displays the results of the cross-check.
self.verfificationresults = TextAreaInput(
title="""Cross-check results""",
value="""Cross-check verification not run.""",
rows=6,
width=610)
# Layout the widgets
self.layout = column(
children=[row(self.help),
row(self.datainsystemheading),
row(children=[self.ecvyearallocations,
self.electionresults,
self.polling]),
row(children=[self.crosscheckheading]),
row(children=[self.headingverification,
self.verificationbutton]),
row(children=[self.verfificationresults])],
sizing_mode='scale_width')
self.panel = Panel(child=self.layout,
title='Manage data')
# %%
def setup(self):
"""Set up object.
Second part of two-part initialization.
Place initialization code here that's more likely to fail.
"""
# Setup the callbacks.
self.verificationbutton.on_click(self.callback_verificationbutton)
# %%
def update(self, years):
"""Update view object."""
self.ecvyearallocations.value = \
' | '.join([str(y) for y in years['allocations']])
self.ecvyearallocations.rows = 6
self.electionresults.value = \
' | '.join([str(y) for y in years['results']])
self.electionresults.rows = 6
self.polling.value = \
' | '.join([str(y) for y in years['polls']])
self.polling.rows = 6
# %%
def callback_verificationbutton(self):
"""Execute callback for Button attribute self.verificationbutton."""
self.verfificationresults.value = self.controller.cross_check()
self.verfificationresults.rows = 6
|
{"/controller/controller.py": ["/model/model.py", "/view/about.py", "/view/managedata.py", "/view/runforecast.py", "/view/forecastbytime.py", "/view/forecastdistribution.py", "/view/forecastbygeography.py", "/view/forecastbystate.py", "/view/pollviewer.py"], "/main.py": ["/controller/controller.py"], "/model/model.py": ["/model/statemodel.py", "/model/electoralcollegemodel.py"]}
|
27,798
|
OpenElement-GachaBot/OpenElement
|
refs/heads/main
|
/gacha.py
|
import time
import pyautogui
import screen
import cv2
import numpy as np
import json
from pynput.keyboard import Key, Listener
import ark
crystal_template = cv2.imread("templates/gacha_crystal.png", cv2.IMREAD_GRAYSCALE)
broken_whip_template = cv2.imread("templates/broken_whip.png", cv2.IMREAD_GRAYSCALE)
added_template = cv2.imread("templates/added_template.png", cv2.IMREAD_GRAYSCALE)
owlshit_template = cv2.imread("templates/owlshit.png", cv2.IMREAD_GRAYSCALE)
tooltips_template = cv2.imread("templates/tool_tips_enabled.png", cv2.IMREAD_GRAYSCALE)
access_inv_template = cv2.imread("templates/access_inventory_template.png")
lower_cyan = np.array([90,250,250])
upper_cyan = np.array([110,255,255])
hsv = cv2.cvtColor(access_inv_template, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower_cyan, upper_cyan)
masked_template = cv2.bitwise_and(access_inv_template, access_inv_template, mask= mask)
access_inv_gray_template = cv2.cvtColor(masked_template, cv2.COLOR_BGR2GRAY)
location = ""
def disableToolTips():
roi = screen.getScreen()[164:210,623:668]
gray_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
res = cv2.matchTemplate(gray_roi, crystal_template, cv2.TM_CCOEFF)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if(max_val > 4000000):
pyautogui.press('g')
def checkInvAccessibleText():
roi = screen.getScreen()[0:1080,600:1300]
screen_hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(screen_hsv, lower_cyan, upper_cyan)
masked_screen = cv2.bitwise_and(roi, roi, mask= mask)
gray_screen = cv2.cvtColor(masked_screen, cv2.COLOR_BGR2GRAY)
res = cv2.matchTemplate(gray_screen, access_inv_gray_template, cv2.TM_CCOEFF)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if(max_val > 25000000):
return True
return False
def checkWeGotRowOfCrystals():
roi = screen.getScreen()[230:330,585:670]
gray_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
res = cv2.matchTemplate(gray_roi, crystal_template, cv2.TM_CCOEFF)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if(max_val > 9000000):
return True
return False
def checkWeGotCrystals():
roi = screen.getScreen()[230:330,120:210]
gray_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
res = cv2.matchTemplate(gray_roi, crystal_template, cv2.TM_CCOEFF)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if(max_val > 10000000):
return True
return False
def waitForAddedGraphic():
counter = 0
while(counter < 10):
roi = screen.getScreen()[1030:1070, 37:142]
gray_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
res = cv2.matchTemplate(gray_roi, added_template, cv2.TM_CCOEFF)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if(max_val > 3000000):
return True
time.sleep(0.1)
counter += 1
return False
def gotOwlShit():
roi = screen.getScreen()[790:880,1710:1800]
gray_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
res = cv2.matchTemplate(gray_roi, owlshit_template, cv2.TM_CCOEFF)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if(max_val > 4000000):
return True
return False
def gotNoOwlShit():
roi = screen.getScreen()[330:410,1240:1330]
gray_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
res = cv2.matchTemplate(gray_roi, owlshit_template, cv2.TM_CCOEFF)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if(max_val > 4000000):
return False
return True
def clickPattern():
time.sleep(0.2)
pyautogui.click()
time.sleep(0.2)
pyautogui.click()
time.sleep(1.0)
def whipCrackPattern():
pyautogui.press('1')
pyautogui.keyDown("right")
time.sleep(1.0)
pyautogui.keyUp("right")
clickPattern()
pyautogui.keyDown("right")
time.sleep(0.5)
pyautogui.keyUp("right")
clickPattern()
pyautogui.keyDown("right")
time.sleep(0.5)
pyautogui.keyUp("right")
clickPattern()
pyautogui.keyDown("right")
time.sleep(0.5)
pyautogui.keyUp("right")
clickPattern()
pyautogui.keyDown("right")
time.sleep(1.5)
pyautogui.keyUp("right")
pyautogui.press('1')
def whipTheCrystals():
ark.openInventory()
time.sleep(1)
ark.takeAll("broken")
time.sleep(1)
#drag a whip from the vault to the first slot on the hot bar
ark.searchStructureStacks("whip")
pyautogui.moveTo(1295, 283);
pyautogui.dragTo(690, 1050, 0.5, button='left')
waitForAddedGraphic()
time.sleep(0.5)
ark.closeInventory()
whipCrackPattern()
while(ark.inventoryIsOpen() == False):
pyautogui.keyDown("left")
time.sleep(0.2)
pyautogui.keyUp("left")
pyautogui.press('f')
time.sleep(2.0)
disableToolTips()
time.sleep(0.5)
pyautogui.moveTo(690, 1050)
pyautogui.click()
pyautogui.press('t')
time.sleep(0.5)
pyautogui.moveTo(167, 185)
pyautogui.click()
pyautogui.typewrite("gacha", interval=0.02)
count = 0
time.sleep(0.2)
pyautogui.moveTo(167, 280, 0.1)
pyautogui.click()
time.sleep(1.0)
while(checkWeGotRowOfCrystals()):
for i in range(6):
pyautogui.moveTo(167+(i*95), 280, 0.1)
pyautogui.click()
pyautogui.press('e')
time.sleep(0.8)
count += 6
pyautogui.moveTo(165, 280)
pyautogui.click()
while(checkWeGotCrystals()):
pyautogui.press('e')
time.sleep(0.8)
count += 1
if(count > 300):
break
ark.transferAll("whip")
time.sleep(0.5)
ark.dropItems("chitin")
ark.dropItems("prim")
ark.closeInventory()
ark.lookUp()
# double press e because sometimes the first press picks up crystals instead of depositing element
pyautogui.press('e')
time.sleep(0.5)
pyautogui.press('e')
time.sleep(0.5)
ark.lookDown()
for i in range(5):
pyautogui.press('f')
time.sleep(2.0)
if(ark.inventoryIsOpen() == True):
time.sleep(0.5)
pyautogui.moveTo(690, 1050)
pyautogui.click()
pyautogui.press('t')
time.sleep(0.5)
ark.transferAll("")
time.sleep(2.0)
ark.closeInventory()
break
pyautogui.keyDown('down')
time.sleep(0.1)
pyautogui.keyUp('down')
def loadTheGacha(food):
# crouch and open gacha inventory
time.sleep(0.5)
pyautogui.press('f')
time.sleep(1.5)
if(ark.inventoryIsOpen() == False): #if gacha inventory isn't open, look down a little and try again
pyautogui.keyDown("down")
time.sleep(0.1)
pyautogui.keyUp("down")
time.sleep(0.5)
pyautogui.press('f')
time.sleep(1.5)
if(ark.inventoryIsOpen() == False): #still can't open gacha, so post error and give up
return
# remove excess owl shit
ark.searchStructureStacks("owl")
if(gotOwlShit()):
ark.takeAll("owl") #take all the owl shit from the gacha
time.sleep(0.5)
ark.tTransferTo(3)
ark.dropItems("owl") #drop the rest of the shit
ark.closeInventory()
#look up to the seed dedi
ark.lookUp()
#take all from seed dedi
ark.openInventory()
ark.takeAll()
time.sleep(0.5)
ark.closeInventory()
#look back at the gacha
ark.lookDown()
#open gacha inventory
pyautogui.press('f')
time.sleep(2.0)
for i in range(5): # loop 5 times, so we try a few times to open gacha inventory
if(ark.inventoryIsOpen()): # if its open, we put in food, take a row back out and break the loop
ark.transferAll(food)
time.sleep(0.5)
ark.searchStructureStacks(food)
ark.tTransferFrom(1)
ark.closeInventory()
break
pyautogui.keyDown("down") # else we look down a little and try again
time.sleep(0.2)
pyautogui.keyUp("down")
pyautogui.press('f')
time.sleep(2.0)
# look back up at the dedi
ark.lookUp()
ark.lookUp()
time.sleep(1.0)
pyautogui.press('e') # put the seeds into the dedi
time.sleep(1.0)
ark.lookDown()
ark.lookDown()
def craftElement():
ark.takeAll("")
time.sleep(0.2)
pyautogui.moveTo(235, 298)
time.sleep(0.2)
pyautogui.press('a')
time.sleep(0.2)
keyPresses = []
def onPress(key):
global keyPresses
keyPresses.append(key)
def onRelease(key):
pass
print("Shazza's Amazing Totally Awesome Gacha Bot")
print("its so op omg")
print("Version 2.83")
print("\n\n")
beds = []
with open('beds.json') as json_file:
data = json.load(json_file)
count = 0
print("Locations:")
for i in data["locations"]:
print(" " + str(count) + " - " + i["name"])
count += 1
val = input("Enter your location (0 - " + str(len(data["locations"])-1) + "): ")
beds = data["locations"][int(val)]
for i in beds["crystalBeds"]:
if(i.get("x") is None):
i["x"] = beds["default_x"]
if(i.get("y") is None):
i["y"] = beds["default_y"]
for i in beds["seedBeds"]:
if(i.get("x") is None):
i["x"] = beds["default_x"]
if(i.get("y") is None):
i["y"] = beds["default_y"]
lapCounter = 0
ark.setParams(1.45, 1.45, 10)
def whipCrystals():
global beds
for i in beds["crystalBeds"]:
ark.bedSpawn(i["name"], i["x"], i["y"])
whipTheCrystals()
ark.accessBed()
while(True):
print("Do you wanna\n1: run the gacha bot\n2: helper macros\n")
val = input("")
if(val == "1"):
print("Starting . . . ")
print("8 seconds to alt tab back in")
time.sleep(8)
print("OK taking over controls")
start = time.time()
count = 0
#start = time.time()
start = 0
count = 0
while(True):
#start = time.time()
for i in beds["seedBeds"]:
duration = time.time() - start
if(duration > 720):
start = time.time()
whipCrystals()
count += 1
if((count%3) == 0):
suicideBed = beds["suicideBed"]
ark.bedSpawn(suicideBed["name"], suicideBed["x"], suicideBed["y"])
time.sleep(15)
ark.bedSpawn(i["name"], i["x"], i["y"])
loadTheGacha(beds["food"])
ark.accessBed()
elif(val == "2"):
print("Quick Handy Macros")
print("F3 - start crafting an element dedi.")
print("F4 - stop crafting element")
listener = Listener(on_press=onPress, on_release=onRelease)
listener.start()
while(True):
while(len(keyPresses) > 0):
key = keyPresses.pop(0)
if(key == Key.f3):
ark.searchMyStacks("element")
run = True
while(run):
craftElement()
while(len(keyPresses) > 0):
if(keyPresses.pop(0) == Key.f4):
run = False
break
time.sleep(0.01)
else:
print("Make a proper selection you idiot. 1 or 2. PICK ONE FFS ITS NOT FUCKING HARD.")
|
{"/gacha.py": ["/ark.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.