text string | size int64 | token_count int64 |
|---|---|---|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the miniMaxSum function below.
def miniMaxSum(arr):
sorted_arr = sorted(arr)
min_sum = sum(sorted_arr[:-1])
max_sum = sum(sorted_arr[1:])
print(min_sum, max_sum)
if __name__ == '__main__':
arr = list(map(int, input().rstrip().split()))
miniMaxSum(arr)
| 364 | 133 |
# -*- coding: utf-8 -*-
import json
import math
import random
import time
import requests
import scrapy
from scrapy.http import HtmlResponse
from scrapy import Request
from spiders.common import OTA
from spiders.items.spot import spot
from spiders.items.price import price
class QunarSpider(scrapy.Spider):
# 标签 0 系统标签,1用户标签
sys_tags = 0
user_tags_true = 1
user_tags_false = 2
name = 'qunar'
allowed_domains = ['www.qunar.com']
start_urls = ['http://www.qunar.com/']
ota_spot_ids = OTA.OtaSpotIdMap.get_ota_spot_list(OTA.OtaCode.QUNAR) # ota 景区id列表
def parse(self, response):
pass
class QunarTagSpider(scrapy.Spider):
name = 'qunar_tag'
allowed_domains = ['www.qunar.com']
total_num = 0 # 总评论
page_size = 20 # 默认爬取每页100条
base_url = r'https://touch.piao.qunar.com/touch/queryCommentsAndTravelTips.json?type=mp&pageSize={page_size}&fromType=SIGHT&pageNum={page_num}&sightId={ota_spot_id}&tagType=44&tagName=%E6%9C%80%E6%96%B0'
start_urls = [
'https://touch.piao.qunar.com/touch/queryCommentsAndTravelTips.json?type=mp&pageSize=1&fromType=SIGHT&pageNum=1&sightId=706176810']
def parse(self, response: HtmlResponse):
# 爬取景区列表数据
for ota_spot_id in QunarSpider.ota_spot_ids:
# 更新景区的评论数量
url = self.base_url.format(ota_spot_id=ota_spot_id, page_num=1, page_size=1)
yield Request(url=url, callback=self.spot_tag, dont_filter=True,
meta={'page_num': 1, 'ota_spot_id': ota_spot_id})
"""获取景区用户点评标签"""
def spot_tag(self, response: HtmlResponse):
response_str = response.body.decode('utf-8')
comment = json.loads(response_str)
if 'data' in comment and 'tagList' in comment['data']:
spot_tag = []
for key, value in enumerate(comment['data']['tagList']):
# print(value['tagName'])
if value['tagType'] in [0, 1, 41, 43, 44]:
tag_type = QunarSpider.sys_tags # 属于系统标签
else:
tag_type = QunarSpider.user_tags_true # 属于用户标签
tag = {'tag_name': value['tagName'], 'tag_num': value['tagNum'], 'tag_score': value['tagScore'],
'tag_type': tag_type}
spot_tag.append(tag)
print(spot_tag, "#" * 20)
print('-' * 20, 'ota_id', OTA.OtaCode.QUNAR.value.id, 'ota_spot_id', response.meta['ota_spot_id'])
spot.Spot.objects(ota_id=OTA.OtaCode.QUNAR.value.id,
ota_spot_id=response.meta['ota_spot_id']).update(
set__tag_list=spot_tag)
pass
class CommentSpider(scrapy.Spider):
name = 'qunar_comment'
allowed_domains = ['www.qunar.com']
total_num = 0 # 总评论
page_size = 10 # 默认爬取每页100条
base_url = r'https://touch.piao.qunar.com/touch/queryCommentsAndTravelTips.json?type=mp&pageSize={page_size}&fromType=SIGHT&pageNum={page_num}&sightId={ota_spot_id}&tagType=44&tagName=%E6%9C%80%E6%96%B0'
start_urls = [
'https://touch.piao.qunar.com/touch/queryCommentsAndTravelTips.json?type=mp&pageSize=1&fromType=SIGHT&pageNum=1&sightId=706176810']
def parse(self, response: HtmlResponse):
headers = {'content-type': 'application/json'}
# 爬取景区列表数据
for ota_spot_id in QunarSpider.ota_spot_ids:
# 更新景区的评论数量
url = self.base_url.format(ota_spot_id=ota_spot_id, page_num=1, page_size=10)
# headers = {'content-type': 'application/json'}
data = requests.get(url, headers=headers)
comment = data.json()
print(ota_spot_id, "共", comment['data']['total'], "条", "*" * 20)
page_size = 10
# 网页上总条数
total_page = comment['data']['total']
# 数据库总条数
now_total = spot.SpotComment.objects(ota_id=OTA.OtaCode.QUNAR.value.id,
ota_spot_id=ota_spot_id).count()
# 准备保存的总条数
to_save_total = total_page - now_total
# 准备保存的总页数
total_page = math.ceil(to_save_total / page_size)
for page_num in range(1, total_page + 1):
if page_num == total_page:
page_size = to_save_total - (page_num - 1) * page_size
else:
page_size = page_size
url = self.base_url.format(ota_spot_id=ota_spot_id, page_num=page_num, page_size=page_size)
print("-" * 30)
print(url)
print("+" * 30)
# headers = {'content-type': 'application/json'}
data = requests.get(url, headers=headers)
try:
comment = data.json()
except Exception:
try:
data = requests.get(url, headers=headers)
comment = data.json()
except Exception:
print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")
print(ota_spot_id, " 第", page_num, "页: ", "共", page_size, "条 ", "*" * 20)
if 'data' in comment and 'commentList' in comment['data']:
for key, value in enumerate(comment['data']['commentList']):
print('正在添加 ', value['author'], ' 的评论', "*" * 20)
spot_comment = spot.SpotComment.objects(ota_id=10004).first()
spot_comment.ota_id = OTA.OtaCode.QUNAR.value.id
spot_comment.ota_spot_id = ota_spot_id
spot_comment.goods_name = value['sightName']
# spot_comment.u_avatar = value['headImg']
spot_comment.u_name = value['author']
spot_comment.c_tag = value['tagList']
spot_comment.c_id = value['commentId']
spot_comment.c_score = value['score']
spot_comment.c_content = value['content']
# spot_comment.c_img = value['imgs']
spot_comment.c_img = [item['small'] for item in value['imgs']]
spot_comment.create_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
yield spot_comment
'''
点评数据加tag
'''
class CommentAndTagSpider(scrapy.Spider):
name = 'comment_and_tag'
allowed_domains = ['touch.piao.qunar.com']
def start_requests(self):
for ota_spot_id in QunarSpider.ota_spot_ids:
print(ota_spot_id, 'ota' * 20)
yield scrapy.FormRequest(
'https://touch.piao.qunar.com/touch/queryCommentsAndTravelTips.json?type=mp&pageSize=10&fromType=SIGHT&pageNum=0&sightId=' + str(
ota_spot_id)
, method='GET'
, meta={'ota_spot_id': ota_spot_id}
, callback=self.after_login)
def after_login(self, response):
print('-' * 20)
result = json.loads(response.body)
if 'data' in result and 'tagList' in result['data']:
spot_tag = []
for key, value in enumerate(result['data']['tagList']):
if value['tagType'] in [0, 1, 41, 43, 44]:
tag_type = QunarSpider.sys_tags # 属于系统标签
else:
tag_type = QunarSpider.user_tags_true # 属于用户标签
tag = {'tag_name': value['tagName'], 'tag_num': value['tagNum'], 'tag_score': value['tagScore'],
'tag_type': tag_type}
spot_tag.append(tag)
print(spot_tag, "#" * 20)
print('-' * 20, 'ota_id', OTA.OtaCode.QUNAR.value.id, 'ota_spot_id', response.meta['ota_spot_id'])
spot.Spot.objects(ota_id=OTA.OtaCode.QUNAR.value.id,
ota_spot_id=response.meta['ota_spot_id']).update_one(
set__tag_list=spot_tag, upsert=True)
if 'data' in result and 'total' in result['data']:
print('共', result['data']['total'], '条', '*' * 20)
for pag_num in range(1, math.ceil(result['data']['total'] / 10)):
# for pag_num in range(1, 5):
print('第', pag_num, '页', '+' * 20)
yield scrapy.FormRequest(
'https://touch.piao.qunar.com/touch/queryCommentsAndTravelTips.json?type=mp&pageSize=10&fromType=SIGHT&pageNum=' + str(
pag_num) + '&sightId=' + str(response.meta['ota_spot_id'])
, method='GET'
, meta={'page': pag_num, 'ota_spot_id': response.meta['ota_spot_id']}
, callback=self.each_page)
def each_page(self, response):
print('-' * 20)
result = json.loads(response.body)
if 'data' in result and 'commentList' in result['data']:
for key, value in enumerate(result['data']['commentList']):
print(value['author'], '第', response.meta['page'], '页', '+' * 20)
if 'headImg' in value:
headImg = value['headImg']
else:
headImg = ''
yield spot.SpotComment.objects(c_id=value['commentId']).update_one(
set__ota_id=OTA.OtaCode.QUNAR.value.id,
set__ota_spot_id=response.meta['ota_spot_id'],
set__goods_name=value['sightName'],
set__u_avatar=headImg,
set__u_name=value['author'],
set__c_tag=value['tagList'],
set__c_score=value['score'],
set__c_content=value['content'],
set__c_img=[item['small'] for item in value['imgs']],
set__create_at=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
upsert=True)
class PriceSpider(scrapy.Spider):
ota_map = [{'ota_spot_id': 706176810, 'sightId': '14407', 'sightName': '石燕湖'} # 石燕湖
, {'ota_spot_id': 1915618311, 'sightId': '187730', 'sightName': '石牛寨'} # 石牛寨
, {'ota_spot_id': 2877753081, 'sightId': '469141', 'sightName': '益阳嘉年华'} # 益阳嘉年华
, {'ota_spot_id': 2554926827, 'sightId': '470541', 'sightName': '花田溪谷'} # 花田溪谷
, {'ota_spot_id': 225118749, 'sightId': '461232', 'sightName': '东浒寨'} # 东浒寨
, {'ota_spot_id': 3821817759, 'sightId': '11829', 'sightName': '马仁奇峰'} # 马仁奇峰
, {'ota_spot_id': 420237024, 'sightId': '39499', 'sightName': '大茅山'} # 大茅山
, {'ota_spot_id': 4123349957, 'sightId': '35473', 'sightName': '九龙江'} # 九龙江
, {'ota_spot_id': 2333288470, 'sightId': '196586', 'sightName': '侠天下'} # 侠天下
, {'ota_spot_id': 3333064220, 'sightId': '461903', 'sightName': '三翁花园'} # 三翁花园
]
name = 'qunar_price'
allowed_domains = ['piao.qunar.com']
login_url = 'http://piao.qunar.com/ticket/detail/getTickets.json'
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
def start_requests(self):
price.OPrice.objects(ota_id=10006).delete()
price.OPriceCalendar.objects(ota_id=10006, create_at=time.strftime("%Y-%m-%d", time.localtime())).delete()
print('start_request')
for value in self.ota_map:
# print(value['sightId'], "*" * 20)
yield scrapy.FormRequest(self.login_url
, formdata={'sightId': value['sightId'], 'from': 'detail'}
, meta={'ota_spot_id': value['ota_spot_id'], 'sight_name': value['sightName']}
, callback=self.after_login)
def after_login(self, response):
print('-' * 20)
result = json.loads(response.body)
if 'data' in result and 'groups' in result['data']:
for k1, v1 in enumerate(result['data']['groups']): # group数据 sightId
ota_product = []
for k2, v2 in enumerate(v1): # 票型数据
tickets = []
typeId = str(v2['typeId'])
ota_spot_name = response.meta['sight_name']
typeKey = ota_spot_name + v2['ticketZoneName']
ticketZoneName = v2['typeName']
total_count = v2['totalCount'] # 总共票数
total_price = 0 # 总共票数
normal_price = v2['qunarPrice']
if 'tickets' in v2:
print(v2['qunarPrice'])
for k3, v3 in enumerate(v2['tickets']):
tickets_list = {'price_id': str(v3['priceId'])
, 'title': v3['title']
, 'seller_nick': v3['supplierName']
, 'price': v3['qunarPrice']
, 'cash_back': v3['cashBack']
, 'cut_price': v3['cutPrice']
, 'sale_num': 0
, 'url': 'http://touch.piao.qunar.com/touch/detail_' + str(response.meta[
'ota_spot_id']) + '.html?st=a3clM0QlRTclOUYlQjMlRTclODclOTUlRTYlQjklOTYlMjZpZCUzRDE0NDA3JTI2dHlwZSUzRDAlMjZpZHglM0QxJTI2cXQlM0RuYW1lJTI2YXBrJTNEMiUyNnNjJTNEV1dXJTI2YWJ0cmFjZSUzRGJ3ZCU0MCVFNiU5QyVBQyVFNSU5QyVCMCUyNnVyJTNEJUU5JTk1JUJGJUU2JUIyJTk5JTI2bHIlM0QlRTklOTUlQkYlRTYlQjIlOTklMjZmdCUzRCU3QiU3RA%3D%3D#from=mpl_search_suggest'
}
tickets.append(tickets_list)
total_price = total_price + v3['qunarPrice']
# print(v3['title']) # priceId qunarPrice cashBack cutPrice supplierId supplierName
ota_product_list = {'type_id': typeId, 'type_key': typeKey, 'type_name': ticketZoneName,
'normal_price': normal_price,
'tickets': tickets}
ota_product.append(ota_product_list)
pre_price = round(total_price / total_count, 2)
print(pre_price, "+" * 20)
# print(ota_product)
'''
价格日历保存
'''
price_calendar = price.OPriceCalendar()
price_calendar.ota_id = OTA.OtaCode.QUNAR.value.id
price_calendar.ota_spot_id = response.meta['ota_spot_id']
price_calendar.ota_spot_name = response.meta['sight_name']
price_calendar.pre_price = pre_price
price_calendar.type_id = typeId
price_calendar.type_key = typeKey
price_calendar.type_name = ticketZoneName
price_calendar.create_at = time.strftime("%Y-%m-%d", time.localtime())
o_price = price.OPrice()
o_price.ota_id = OTA.OtaCode.QUNAR.value.id
o_price.ota_spot_id = response.meta['ota_spot_id']
o_price.ota_spot_name = ota_spot_name
o_price.ota_product = ota_product # typeId typeName qunarPrice
price_calendar.save(force_insert=False, validate=False, clean=True)
yield o_price
| 15,322 | 5,458 |
#!/usr/bin/env python3
import argparse
import os
import sys
import random
import math
from tqdm import tqdm
from collections import Counter
import operator
import numpy as np
FLAGS = None
next_random = 1
def subsampling_bool(freq, sampling):
global next_random
next_random = (next_random * 25214903917 + 11) & 0xFFFF
prob = (math.sqrt(sampling/freq)+sampling/freq)
return next_random/65536.0 - (math.sqrt(sampling/freq)+sampling/freq)
# return 1. - (math.sqrt(sampling/freq)+sampling/freq)
def subsample(label_dir, label_list, sampling, min_count):
word_dic = Counter()
count = 0
for l_file in label_list:
with open(os.path.join(label_dir, l_file), 'r') as f_l:
for line in f_l:
word = line.split(',')[0]
word_dic[word] += 1
count += 1
sorted_words = sorted(word_dic.items(), key=operator.itemgetter(1), reverse=True)
with open(os.path.join(label_dir, '../word_count'), 'w') as fout:
for word in sorted_words:
fout.write(word[0] + ', ' + str(word[1]) + '\n')
prob_dic = {}
for word in sorted_words:
if word[1] < min_count:
prob_dic[word[0]] = 1.
else:
prob_dic[word[0]] = subsampling_bool(word[1]/count, sampling)
if prob_dic[word[0]] < 0.:
prob_dic[word[0]] = 0.
return prob_dic
def main():
example_list = os.listdir(FLAGS.example_dir)
label_list = os.listdir(FLAGS.label_dir)
utter_list = os.listdir(FLAGS.utter_dir)
num_file = len(example_list)
subsampling_dic = subsample(FLAGS.label_dir, label_list, FLAGS.sampling, FLAGS.min_count)
subsampled_words = Counter()
for u_file, e_file, l_file in tqdm(zip(utter_list, example_list, label_list)):
count = 0
with open(os.path.join(FLAGS.example_dir, e_file), 'r') as f_e:
with open(os.path.join(FLAGS.label_dir, l_file), 'r') as f_l:
with open(os.path.join(FLAGS.utter_dir, u_file), 'r') as f_u:
with open(os.path.join(FLAGS.subsampled_example_dir, e_file), 'w') as f_out_e:
with open(os.path.join(FLAGS.subsampled_label_dir, l_file), 'w') as f_out_l:
with open(os.path.join(FLAGS.subsampled_utter_dir, u_file), 'w') as f_out_u:
for u, e, l in zip(f_u, f_e, f_l):
count += 1
label = l.split(',')[0]
utter = u.split(',')[0]
spk = utter.split('-')[0]
context_labels = l[:-1].split(',')[1].split()
write_bool = True
for c_l in context_labels:
if subsampling_dic[c_l] == 1.:
write_bool = False
break
if write_bool == False:
continue
prob = subsampling_dic[label]
if np.random.choice([True, False], p=[1-prob, prob]):
try:
f_out_e.write(e[:-1]+'\n')
f_out_l.write(l[:-1]+'\n')
# f_out_u.write(spk+'\n')
f_out_u.write(u[:-1]+'\n')
subsampled_words[label] += 1
except:
print (l)
print (count)
sorted_words = sorted(subsampled_words.items(), key=operator.itemgetter(1), reverse=True)
with open(os.path.join(FLAGS.label_dir, '../subsampled_word_count'), 'w') as fout:
for word in sorted_words:
fout.write(word[0] + ', ' + str(word[1]) + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description =
'transform text format features into tfrecords')
parser.add_argument(
'example_dir',
metavar='<example dir>',
type=str,
help='example dir'
)
parser.add_argument(
'label_dir',
metavar='<label dir>',
type=str,
help='label dir'
)
parser.add_argument(
'utter_dir',
metavar='<utter dir>',
type=str,
help='utter dir'
)
parser.add_argument(
'subsampled_example_dir',
metavar='<subsampled example dir>',
type=str,
help='subsampled_example_dir'
)
parser.add_argument(
'subsampled_label_dir',
metavar='<subsampled label dir>',
type=str,
help='subsampled_label_dir'
)
parser.add_argument(
'subsampled_utter_dir',
metavar='<subsampled utter dir>',
type=str,
help='subsampled_utter_dir'
)
parser.add_argument(
'sampling',
metavar='<subsampling factor>',
type=float,
help='subsampling factor'
)
parser.add_argument(
'min_count',
metavar='<min count>',
type=int,
help='min count'
)
parser.add_argument(
'--feats_dim',
metavar='<feats-dim>',
type=int,
default=256,
help='feature dimension'
)
parser.add_argument(
'--norm_var',
metavar='<True|False>',
type=bool,
default=False,
help='Normalize Variance of each sentence'
)
parser.add_argument(
'--norm_mean',
metavar='<True|False>',
type=bool,
default=False,
help='Normalize mean of each sentence'
)
FLAGS = parser.parse_args()
main()
| 5,941 | 1,874 |
import pandas as pd
# Read the Hofstede indices into a pandas dataframe
data = pd.read_csv("..\\data\\Hofstede Insights - Manual 2021-05-13.csv", delimiter=",", index_col="country")
# Transform all data in the dataframe to strings
data["pdi"] = data["pdi"].astype(str)
data["idv"] = data["idv"].astype(str)
data["mas"] = data["mas"].astype(str)
data["uai"] = data["uai"].astype(str)
result = ""
for country, row in data.iterrows():
# Generate the C# code to add the Hofstede metrics to a dictionary of the form:
# Dictionary<string, (int Pdi, int Idv, int Mas, int Uai)>
result += "{ \"" + country.lower() + "\", (" + row["pdi"] + ", " + row["idv"] + ", " + row["mas"] + ", " + row["uai"] + ") },\n"
# Print the result so we can copy the generated c# code from the console
print(result) | 819 | 286 |
# pylint: disable="import-error"
from command_line_creator import CommandLineCreator
from current_cmd_b import CurrentCMD_B
class CmdCreatorB(CommandLineCreator):
def create_cmd(self):
current_cmd = CurrentCMD_B(self.output)
return current_cmd | 269 | 85 |
from utils import _conv2d, _bn, _block
import tensorflow as tf
def WideResNet(x, dropout, phase, layers, kval, scope, n_classes = 10): # Wide residual network
# 1 conv + 3 convblocks*(3 conv layers *1 group for each block + 2 conv layers*(N-1) groups for each block [total 1+N-1 = N groups]) = layers
# 3*2*(N-1) = layers - 1 - 3*3
# N = (layers -10)/6 + 1
# So N = (layers-4)/6
N = (layers - 4) / 6
# o = _conv2d(x, [3, 3, 3, 16], 1, scope)
# in_shape = x.get_shape()
print x.get_shape()
o = _conv2d(x, [3, 3, 1, 16], 1, scope)
print o.get_shape()
o = _bn(o, phase)
print o.get_shape()
o = _block(o, N, kval, 16, 16, 1, dropout, phase, scope)
print o.get_shape()
o = _block(o, N, kval, 16 * kval, 32, 2, dropout, phase, scope)
print o.get_shape()
o = _block(o, N, kval, 32 * kval, 64, 2, dropout, phase, scope)
print o.get_shape()
pooled = tf.nn.avg_pool(o, ksize=[1, 7, 7, 1], strides=[1, 1, 1, 1], padding='VALID')
print o.get_shape()
# Initialize weights and biases for fully connected layers
with tf.variable_scope(scope + "regularize", reuse=False):
wd = tf.Variable(tf.truncated_normal([1 * 1 * 64 * kval, 64 * kval], stddev=5e-2))
wout = tf.Variable(tf.truncated_normal([64 * kval, n_classes]))
bd1 = tf.Variable(tf.constant(0.1, shape=[64 * kval]))
bout = tf.Variable(tf.constant(0.1, shape=[n_classes]))
# Fully connected layer
# Reshape pooling layer output to fit fully connected layer input
fc = tf.reshape(pooled, [-1, wd.get_shape().as_list()[0]])
fc = tf.add(tf.matmul(fc, wd), bd1)
fc = tf.nn.elu(fc)
# Output, class prediction
out = tf.add(tf.matmul(fc, wout), bout)
return out | 1,744 | 740 |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 06 00:27:21 2016
@author: maheshwa
"""
from __future__ import print_function
import requests
import time
import binascii
import hashlib
import json
# Authentication
class AdobeAnalytics:
def __init__(self, user_name, shared_secret, endpoint='', debug=False):
"""
Entry point for making authenticated API calls to the Adobe Report API's
"""
self.__user_name = user_name
self.__shared_secret = shared_secret
self.__company = self.__user_name.split(":")[1]
self.__debug = debug
# If user doesn't specify their own endpoint, call API to get proper endpoint
# Most users should never do this, but some have requested capability in RSiteCatalyst
self.__api_url = 'https://api.omniture.com/admin/1.4/rest/'
if endpoint != '':
self.__api_url = endpoint
else:
self.__api_url = self.GetEndpoint(company=self.__company)
def __buildheader(self):
"""
Returns required header for authenticating API calls. This is an internal method to be used
by other public method calls.
"""
nonce = str(time.time())
base64nonce = binascii.b2a_base64(binascii.a2b_qp(nonce)).decode('utf-8')
created_date = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
sha_object = hashlib.sha1((nonce + created_date + '%s' % (self.__shared_secret)).encode('utf-8'))
password_64 = binascii.b2a_base64(sha_object.digest()).decode('utf-8')
X_str = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % (
'%s:%s' % (self.__user_name, self.__company), password_64.strip(), base64nonce.strip(), created_date)
return {'X-WSSE': X_str}
def __callapi(self, endpoint, verb="POST", **kwargs):
"""
Calls the Adobe Analytics API at a given endpoint and variable arguments
"""
# Automatically convert an rsid_list string type to list as required by API
if "rsid_list" in kwargs and isinstance(kwargs["rsid_list"], str):
kwargs["rsid_list"] = [kwargs["rsid_list"]]
header = self.__buildheader()
if verb == "GET":
req = requests.get('%s?method=%s' % (self.__api_url, endpoint), params=json.dumps(kwargs), headers=header)
if self.__debug:
print(json.dumps(kwargs))
else:
req = requests.post('%s?method=%s' % (self.__api_url, endpoint), data=json.dumps(kwargs), headers=header)
if self.__debug:
print(json.dumps(kwargs))
return req.json()
def GetActivation(self, rsid_list):
"""
Retrieves the activation status for each of the specified report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetActivation', rsid_list=rsid_list)
def GetAxleStartDate(self, rsid_list):
"""
Retrieves the date a report suite was migrated from SiteCatalyst 14 to axle processing (version 15).
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetAxleStartDate', rsid_list=rsid_list)
def GetBaseCurrency(self, rsid_list):
"""
Retrieves a list of supported currency codes for each of the specified report suites
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetBaseCurrency', rsid_list=rsid_list)
def GetBaseURL(self, rsid_list):
"""
Retrieves the base URL assigned to each of the specified report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetBaseURL', rsid_list=rsid_list)
def GetBookmarks(self, folder_limit=None, folder_offset=None):
"""
Retrieves a list of bookmarks for the authenticated user.
Keyword arguments:
folder_limit -- (optional) Limit the retrieval to the specified number of bookmarks.
folder_offset -- (optional) Start the bookmark retrieval at the specified offset.
"""
report_description = {}
if folder_limit:
report_description["folder_limit"] = folder_limit
if folder_offset:
report_description["folder_offset"] = folder_limit
return self.__callapi('Bookmark.GetBookmarks', report_description=report_description)
def GetCalculatedMetrics(self, rsid_list):
"""
Retrieves the calculated metrics assigned to each of the specified report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetCalculatedMetrics', rsid_list=rsid_list)
def GetClassifications(self, rsid_list, element_list=[]):
"""
Retrieves a list of classifications (associated with the specified element) for each of the
specified report suites.
Keyword arguments:
rsid_list = Single report suite id or list of report suites
"""
return self.__callapi('ReportSuite.GetClassifications', rsid_list=rsid_list, element_list=element_list)
def GetCustomCalendar(self, rsid_list):
"""
Retrieves the custom calendar for each of the specified report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetCustomCalendar', rsid_list=rsid_list)
def GetDashboards(self, ):
return self.__callapi('Bookmark.GetDashboards', )
def GetDataWarehouseDisplay(self, rsid_list):
"""
Returns if data warehouse is enabled for the requested report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetDataWarehouseDisplay', rsid_list=rsid_list)
def GetDefaultPage(self, rsid_list):
"""
Retrieves the default page for each of the specified report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetDefaultPage', rsid_list=rsid_list)
def GetDiscoverEnabled(self, rsid_list):
"""
Returns whether ad hoc analysis (formerly Discover) is enabled for the requested report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetDiscoverEnabled', rsid_list=rsid_list)
def GetEcommerce(self, rsid_list):
"""
Retrieves the commerce level for each of the specified report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetEcommerce', rsid_list=rsid_list)
def GetElements(self, rsid_list, elements=[], metrics=[]):
"""
Get Valid Elements for a Report Suite
Keyword arguments:
rsid_list -- Single report suite id, or character vector of report suite ids
metrics -- list of existing metrics you want to use in combination with an additional metric
elements -- list of existing elements you want to use in combination with an additional metric
"""
result = {}
for report in rsid_list:
result[report] = self.__callapi('Report.GetElements', reportSuiteID=report, existingElements=elements,
existingMetrics=metrics)
return result
def GetEndpoint(self, company):
"""
Calls Company.GetEndpoint to determine the appropriate endpoint for a give company
Keyword arguments:
company -- Company to retrieve endpoint for
"""
return self.__callapi('Company.GetEndpoint', "GET", company=company)
def GetEvars(self, rsid_list):
"""
Retrieves the commerce variables for each of the specified report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetEvars', rsid_list=rsid_list)
def GetFeed(self, feed_id):
"""
Get Data Feed Detail for a specific feed
Keyword arguments:
feed_id -- Data Feed ID
"""
return self.__callapi('DataFeed.GetFeed', feed_id=feed_id)
def GetFeeds(self, rsid_list, start_time="", end_time="", status=[]):
"""
Get Data Feed Detail for a Report Suite(s)
Keyword arguments:
rsid_list -- Report suite id (or list of report suite ids)
start_time -- Beginning of time period you want to check
end_time -- End of time period you want to check
status -- Character vector/list of statuses to filter by
Example:
feeds2 = GetFeeds("zwitchdev", "2014-12-02 05:00:00", "2014-12-03 05:00:00")
"""
return self.__callapi('DataFeed.GetFeeds', rsid_list=rsid_list, status=status, start_time="", end_time="")
def GetFunctions(self, ):
return self.__callapi('CalculatedMetrics.GetFunctions', )
def GetGeoSegmentation(self, rsid_list):
"""
Retrieves the geography segmentation for the requested report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetGeoSegmentation', rsid_list=rsid_list)
def GetGroups(self, ):
return self.__callapi('Permissions.GetGroups', )
def GetInternalURLFilters(self, rsid_list):
"""
Retrieves the internal URL filters for each of the specified report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetInternalURLFilters', rsid_list=rsid_list)
def GetIPAddressExclusions(self, rsid_list):
"""
Returns a list of IP addresses excluded from website tracking for each of the specified report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetIPAddressExclusions', rsid_list=rsid_list)
def GetIPObfuscation(self, rsid_list):
"""
Retrieves the IP Address Obfuscation setting for each of the specified report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetIPObfuscation', rsid_list=rsid_list)
def GetKeyVisitors(self, rsid_list):
"""
Retrieves a list of key visitors for each of the specified report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetKeyVisitors', rsid_list=rsid_list)
def GetListVariables(self, rsid_list):
"""
Retrieves the list variables for the requested report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetListVariables', rsid_list=rsid_list)
def GetLocalization(self, rsid_list):
"""
Retrieves the localization (multi-byte character) settings for each of the specified report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetLocalization', rsid_list=rsid_list)
def GetLogin(self, ):
return self.__callapi('Permissions.GetLogin', )
def GetLogins(self, ):
return self.__callapi('Permissions.GetLogins', )
def GetMarketingChannelCosts(self, rsid_list):
"""
Returns the currently defined Marketing Channel costs for the specified report suite.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetMarketingChannelCosts', rsid_list=rsid_list)
def GetMarketingChannelExpiration(self, rsid_list):
"""
Returns the currently defined Marketing Channel expiration dates for the specified report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetMarketingChannelExpiration', rsid_list=rsid_list)
def GetMarketingChannelRules(self, rsid_list):
"""
Returns the currently defined Marketing Channel rules for the specified report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetMarketingChannelRules', rsid_list=rsid_list)
def GetMarketingChannels(self, rsid_list):
"""
Returns the currently defined Marketing Channels for the specified report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetMarketingChannels', rsid_list=rsid_list)
def GetMetrics(self, rsid_list, elements=[], metrics=[]):
"""
Get Valid Metrics for a Report Suite
Keyword arguments:
rsid_list -- Single report suite id, or character vector of report suite ids
metrics -- list of existing metrics you want to use in combination with an additional metric
elements -- list of existing elements you want to use in combination with an additional metric
"""
result = {}
for report in rsid_list:
result[report] = self.__callapi('Report.GetMetrics', reportSuiteID=report, existingElements=elements,
existingMetrics=metrics)
return result
def GetMobileAppReporting(self, rsid_list):
"""
Retrieves the Mobile Application Tracking settings for the requested report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetMobileAppReporting', rsid_list=rsid_list)
def GetPaidSearchDetection(self, rsid_list):
"""
Retrieves the paid search settings for each of the specified report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetPaidSearchDetection', rsid_list=rsid_list)
def GetPermanentTraffic(self, rsid_list):
"""
Retrieves the permanent traffic settings for each of the specified report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetPermanentTraffic', rsid_list=rsid_list)
def GetProcessingStatus(self, rsid_list):
"""
Returns processing status for the given report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetPermanentTraffic', rsid_list=rsid_list)
def GetPrivacySettings(self, rsid_list):
"""
Returns the activation date for the report suite(s) specified.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetPrivacySettings', rsid_list=rsid_list)
def GetProps(self, rsid_list):
"""
Retrieves the props (traffic variables) for the specified report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetProps', rsid_list=rsid_list)
def GetQueue(self, ):
return self.__callapi('Report.GetQueue')
def GetRealTimeReport(self, rsid_list, metrics=[], elements=[], date_granularity=5,
date_from="1 hour ago", date_to="now", sort_algorithm="mostpopular",
floor_sensitivity=.25, first_rank_period=0,
algorithm_argument="linear", everything_else=True,
selected=[]):
"""
Function to access the Adobe Analytics Real-Time API v1.4.
This API provides the ability for reporting up to the most recent minute.
This API is best used at 15-30 second intervals (or longer).
keyword arguments:
rsid_list -- Report Suite
metrics -- Report metric
elements -- Report breakdowns
date_granularity -- Report Granularity. Defaults to 5 minutes
date_from -- Report starting time. Defaults to "1 hour ago"
date_to -- Report end time. Defaults to "now"
sort_algorithm -- Sorting algorithm. Defaults to "mostpopular"
floor_sensitivity -- Floor sensitivity. Defaults to .25
first_rank_period -- First Ranking Period. Defaults to 0
algorithm_argument -- Ranking algorithm. Defaults to "linear"
everything_else -- Provide counts for elements not returned as 'top'
selected -- Selected items for a given element (only works for a single element)
"""
return self.__callapi('Report.Run', rsid_list=rsid_list, metrics=metrics, elements=[])
def GetRealTimeSettings(self, rsid_list):
"""
Returns the metrics that are configured to provide real time data.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetRealTimeSettings', rsid_list=rsid_list)
def GetReportDescription(self, bookmark):
"""
Get report description for a specific bookmark_id
keyword arguments:
bookmark -- Bookmark ID
"""
return self.__callapi('Bookmark.GetReportDescription', bookmark_id=bookmark)
def GetReportSuites(self):
"""Returns all report suites available to user from a given company."""
return self.__callapi('Company.GetReportSuites')
def GetScheduledSpike(self, rsid_list):
"""
Retrieves the scheduled traffic increase settings for the specified report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetScheduledSpike', rsid_list=rsid_list)
def GetSegments(self, rsid_list):
"""
Retrieves the segments that are available in one or more report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetSegments', rsid_list=rsid_list)
def GetSiteTitle(self, rsid_list):
"""
Retrieves the site title (friendly name) for each of the specified report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetSiteTitle', rsid_list=rsid_list)
def GetEvents(self, rsid_list):
"""
Retrieves the success events for each of the specified report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetEvents', rsid_list=rsid_list)
def GetTemplate(self, rsid_list):
"""
Retrieves the creation template for each of the specified report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetTemplate', rsid_list=rsid_list)
def GetTimeZone(self, rsid_list):
"""
Retrieves the Time Zone setting for each of the specified report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetTimeZone', rsid_list=rsid_list)
def GetTrackingServer(self, rsid):
"""
Returns the activation date for the report suite(s) specified.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('Company.GetTrackingServer', rsid=rsid)
def GetTransactionEnabled(self, rsid_list):
"""
Retrieves the transaction ids storage enable for the requested report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetTransactionEnabled', rsid_list=rsid_list)
def GetUniqueVisitorVariable(self, rsid_list):
"""
Retrieves the unique visitor variable setting for the specified report suites.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetUniqueVisitorVariable', rsid_list=rsid_list)
def GetVersionAccess(self):
return self.__callapi('Company.GetVersionAccess',)
def GetVideoSettings(self, rsid_list):
"""
Retrieves video measurement settings.
Keyword arguments:
rsid_list -- Report suites to evaluate
"""
return self.__callapi('ReportSuite.GetVideoSettings', rsid_list=rsid_list)
def CancelReport(self, report_id):
"""
Cancels a report
Keyword arguments:
report_id -- report to Cancel
"""
js = '{"reportID": %s}' % (report_id)
return self.__callapi('Report.Cancel', js=js)
def ValidateReport(self, report_description, interval_seconds=0, max_attempts=1):
"""
Checks if report is valid
Keyword arguments:
report_description -- json of the report
interval_seconds -- how long to wait
max_attempts -- how
"""
return self.__callapi('Report.Validate', report_description=report_description)
| 21,700 | 6,046 |
import logging
import functools
import mock
from testify import TestCase, setup
from testify import class_setup, class_teardown
from testify import teardown
import time
from tron.utils import timeutils
log = logging.getLogger(__name__)
# TODO: remove when replaced with tron.eventloop
class MockReactorTestCase(TestCase):
"""Patch the reactor to a MockReactor."""
# Override this in subclasses
module_to_mock = None
@class_setup
def class_setup_patched_reactor(self):
msg = "%s must set a module_to_mock field" % self.__class__
assert self.module_to_mock, msg
self.old_reactor = getattr(self.module_to_mock, 'reactor')
@class_teardown
def teardown_patched_reactor(self):
setattr(self.module_to_mock, 'reactor', self.old_reactor)
@setup
def setup_mock_reactor(self):
self.reactor = Turtle()
setattr(self.module_to_mock, 'reactor', self.reactor)
# TODO: remove
class MockTimeTestCase(TestCase):
now = None
@setup
def setup_current_time(self):
assert self.now, "%s must set a now field" % self.__class__
self.old_current_time = timeutils.current_time
timeutils.current_time = lambda: self.now
@teardown
def teardown_current_time(self):
timeutils.current_time = self.old_current_time
# Reset 'now' back to what was set on the class because some test may
# have changed it
self.now = self.__class__.now
def retry(max_tries=3, delay=0.1, exceptions=(KeyError, IndexError)):
"""A function decorator for re-trying an operation. Useful for MongoDB
which is only eventually consistent.
"""
def wrapper(f):
@functools.wraps(f)
def wrap(*args, **kwargs):
for _ in xrange(max_tries):
try:
return f(*args, **kwargs)
except exceptions:
time.sleep(delay)
raise
return wrap
return wrapper
# TODO: remove when replaced with mock
class Turtle(object):
"""A more complete Mock implementation."""
def __init__(self, *args, **kwargs):
self.__dict__.update(kwargs)
self.calls = []
self.returns = []
def __getattr__(self, name):
self.__dict__[name] = type(self)()
return self.__dict__[name]
def __call__(self, *args, **kwargs):
self.calls.append((args, kwargs))
new_turtle = type(self)()
self.returns.append(new_turtle)
return new_turtle
def autospec_method(method, *args, **kwargs):
"""create an autospec for an instance method."""
mocked_method = mock.create_autospec(method, *args, **kwargs)
setattr(method.im_self, method.__name__, mocked_method)
| 2,750 | 855 |
from classes.Display import Display
from classes.FinalScoreboard import FinalScoreboard
from classes.Sounds import Sounds
from classes.TargetArea import TargetArea
from classes.Target import Target
from classes.Text import Text
from classes.Timer import Timer
from time import time
import pygame
class App(object):
"""
Main Class
"""
BORDER = 10
DISPLAY_COLOR = (100,100,100)
DISPLAY_GEOMETRY = [700,500]
DISPLAY_TITLE = "Aim Trainer"
FRAMES_PER_SECOND = 60
LIVES = 5
MISSING_SHOTS_DECREASES_LIFE = False
SCOREBOARD_AREA = 50
SCOREBOARD_COLOR = (255,255,255)
SCOREBOARD_FONT = ('Comic Sans MS', 21)
SCOREBOARD_FORMAT = "Hits: %i Accuracy: %.1f%% FPS: %i Targets: %.2f/s Lives: %i"
SCOREBOARD_LOCATION = [BORDER+1,10]
SOUNDS_BUFFER = 64
TARGET_ADD_TIME = 0.2
TARGET_AREA_COLORS = [(128,128,128),(148,148,148)]
TARGET_BORDER = 0
TARGET_AREA_GEOMETRY = [0+BORDER,SCOREBOARD_AREA+BORDER,DISPLAY_GEOMETRY[0]-BORDER,DISPLAY_GEOMETRY[1]-BORDER]
TARGET_COLORS = [(255,0,0),(255,255,255)]
TARGET_LIMIT_PER_SECOND = None
TARGET_RADIUS = 40
TARGETS_PER_SECOND = 1.8
TARGET_SPEED = 0.4
FINAL_SCOREBOARD_BACKGROUND_COLOR = (255,255,255)
FINAL_SCOREBOARD_BORDER = 5
FINAL_SCOREBOARD_BORDER_COLOR = (139,69,19)
FINAL_SCOREBOARD_FONT = ("Arial",40)
FINAL_SCOREBOARD_GEOMETRY = [TARGET_AREA_GEOMETRY[0]+50,TARGET_AREA_GEOMETRY[1]+50,TARGET_AREA_GEOMETRY[2]-50,TARGET_AREA_GEOMETRY[3]-50]
FINAL_SCOREBOARD_TEXT_COLOR = (80,80,80)
def __init__(self):
self.sounds = Sounds(self.SOUNDS_BUFFER)
pygame.init()
self.display = Display(
*self.DISPLAY_GEOMETRY,
self.DISPLAY_TITLE,
self.DISPLAY_COLOR
)
self.__surface = self.display.getSurface()
self.finalScoreboard = FinalScoreboard(
self.__surface,
*self.FINAL_SCOREBOARD_GEOMETRY,
self.FINAL_SCOREBOARD_FONT,
self.FINAL_SCOREBOARD_BORDER,
self.FINAL_SCOREBOARD_BORDER_COLOR,
self.FINAL_SCOREBOARD_TEXT_COLOR,
self.FINAL_SCOREBOARD_BACKGROUND_COLOR,
self.TARGET_COLORS
)
self.scoreboardText = Text(
self.__surface,
*self.SCOREBOARD_LOCATION,
text_font=self.SCOREBOARD_FONT,
text_color=self.SCOREBOARD_COLOR
)
self.targetArea = TargetArea(
self.__surface,
*self.TARGET_AREA_GEOMETRY,
self.TARGET_AREA_COLORS
)
self.__timer = Timer()
self.__clock = pygame.time.Clock()
def captureEvents(self):
"""
Method for capturing events and taking action based on them.
"""
for event in pygame.event.get():
# Verifica se houve um evento para fechar a janela do programa.
if event.type == pygame.QUIT:
self.__stop = True
break
# Verifica se uma tecla foi pressionada.
if event.type == pygame.KEYDOWN:
# Se a tecla pressionada foi "Esc", o programa será fechado.
if event.key == pygame.K_ESCAPE:
self.__stop = True
break
# Se a tecla pressionada foi "Enter" ou "Space", será criada uma nova
# sessão caso o usuário esteja na tela de fim de jogo.
elif event.key in [pygame.K_RETURN,pygame.K_SPACE]:
if not self.__start:
self.__start = True
# Se o botão "1" do mouse foi pressionado, será efetuado um disparo.
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
# Se uma sessão estiver em execução, será executado um som de tiro.
# Senão, o som a ser executado será de uma arma sem munição.
if self.__start:
self.sounds.playSound(self.sounds.shooting_sound)
else:
self.sounds.playSound(self.sounds.without_ammunition_sound)
continue
# Verifica se o tiro acertou algum alvo.
for target in self.__targets.copy():
# Obtém a posição (x,y) do tiro em relação ao alvo
hit = target.checkHit()
# Se acertou, o número de acertos aumentará e o alvo será removido.
if hit:
self.sounds.playSound(self.sounds.metal_hit_sound)
self.__shots.append(hit)
self.__targets.remove(target)
self.__hits += 1
return
# Se nenhum alvo foi acertado, o número de falhas aumentará e caso
# a opção para perda de vida por tiros perdidos esteja ativada,
# o usuário perderá uma vida na sessão.
if self.MISSING_SHOTS_DECREASES_LIFE:
self.__lives -= 1
self.__failures += 1
def createTarget(self):
"""
Method to create a target within the screen.
"""
target = Target(
surface = self.__surface,
area_geometry = self.TARGET_AREA_GEOMETRY,
radius=self.TARGET_RADIUS,
target_colors=self.TARGET_COLORS
)
self.__targets.append(target)
def gameOver(self):
"""
Method for creating an endgame screen.
"""
self.__start = False
# Obtém as informações da última sessão para inserir os dados no placar final.
hits = self.__hits
accuracy = FinalScoreboard.getAccuracy(self.__hits+self.__failures,self.__hits)
targets_per_second = self.__target_per_second
time = self.__timer.getTime()
shots = self.__shots.copy()
# Enquanto o usuário não tentar fechar o programa ou pressionar uma tecla para criar
# uma nova sessão, a tela de fim de jogo será desenhada.
while not self.__stop and not self.__start:
self.captureEvents()
self.display.drawDisplay()
self.targetArea.drawArea()
# Coloca instrução no área do placar para continuar, criando uma nova sessão.
self.scoreboardText.setText('GAME OVER: Click "Enter" or "Space" to continue.')
self.scoreboardText.drawText()
self.finalScoreboard.drawFinalScoreboard(hits,accuracy,targets_per_second,time,shots)
self.__clock.tick(self.FRAMES_PER_SECOND)
pygame.display.flip()
# Se o usuário pressionar uma botão para sair do programa, o mesmo fechará.
# Se o usuário pressionar uma tecla para continuar, uma nova sessão será criada.
if self.__stop:
pygame.quit()
else: self.run()
def run(self):
"""
Method to start a new session.
"""
self.__failures = 0
self.__hits = 0
self.__stop = False
self.__targets = []
self.__shots = []
self.__lives = self.LIVES
self.__target_per_second = self.TARGETS_PER_SECOND
self.__start = True
# Define a fonte para o placar
self.scoreboardText.setFont(self.SCOREBOARD_FONT)
# Inicia o cronômetro
self.__timer.start()
last_time_to_create_target = time()
last_time_to_add_tps = time()
# Enquanto o usuário não tentar fechar o programa e possuir vidas, a sessão
# continuará a ser executada.
while not self.__stop and self.__lives > 0:
self.captureEvents()
# Cria um novo alvo com base na quantidade de alvos por segundo.
if time() - last_time_to_create_target >= 1/self.__target_per_second:
self.createTarget()
last_time_to_create_target = time()
# Aumenta a quantidade de alvos por segundos.
if time() - last_time_to_add_tps >= self.TARGET_ADD_TIME:
if not self.TARGET_LIMIT_PER_SECOND or self.TARGET_LIMIT_PER_SECOND > self.__target_per_second:
self.__target_per_second += 1/self.__target_per_second/100
last_time_to_add_tps = time()
self.update()
# Se o programa saiu do "while" devido a chamada de um evento
# para fechar o programa, o programa será finalizado.
# Se este não foi o caso, quer dizer que a sessão atual encerrou e irá
# direto para a tela de fim de jogo.
if self.__stop:
pygame.quit()
else:
self.gameOver()
def setScore(self):
"""
Method for inserting updated information in the scoreboard.
"""
hits = self.__hits
accuracy = FinalScoreboard.getAccuracy(self.__hits+self.__failures,self.__hits)
fps = self.__clock.get_fps()
targets_per_second = self.__target_per_second
self.scoreboardText.setText(self.SCOREBOARD_FORMAT%(hits,accuracy,fps,targets_per_second,self.__lives))
def targetAnimation(self):
"""
Method for generating target animation.
"""
targets = self.__targets.copy()
targets.reverse()
for target in targets:
try:
# Caso não seja possível aumentar ainda mais o alvo,
# seu tamanho diminuirá.
if target.increase(self.TARGET_SPEED) == -1:
target.decreases(self.TARGET_SPEED)
target.drawTarget(border=self.TARGET_BORDER)
# Caso o alvo tenha diminuido até o limite, ele será removido
# e um som de alvo perdido será executado.
except ValueError:
self.sounds.playSound(self.sounds.target_loss_sound)
self.__targets.remove(target)
self.__lives -= 1
def update(self):
"""
Method for updating the graphics part of the program.
"""
self.setScore()
self.display.drawDisplay()
self.scoreboardText.drawText()
self.targetArea.drawArea()
self.targetAnimation()
self.__clock.tick(self.FRAMES_PER_SECOND)
pygame.display.flip()
| 10,397 | 3,450 |
"""Provide test fixtures"""
import logging
import os
import pytest
import vcd
@pytest.fixture
def dummy_vcd_file(tmpdir):
"""Create vcd file with random data"""
filename = os.path.sep.join([str(tmpdir), 'test.vcd'])
with open(filename, 'w+') as fptr:
with vcd.VCDWriter(fptr, timescale=(10, 'ns'), date='today') as writer:
counter_var = writer.register_var('', 'dummyvar', 'integer',
size=8)
counter3_var = writer.register_var('a', 'dummyvara', 'integer')
counter_var = writer.register_var('a.b.c', 'counter', 'integer',
size=8)
for i in range(1000, 300000, 300):
timestamp = 0
for timestamp, value in enumerate(range(10, 200, 2)):
writer.change(counter_var, i + timestamp, value)
writer.change(counter3_var, i + timestamp, i % 42)
return filename
# pylint: disable=too-many-locals
@pytest.fixture
def src_merge_file(tmpdir):
"""Create two vcd files with random data and the expected, merged result"""
src1 = os.path.sep.join([str(tmpdir), 'src1.vcd'])
src2 = os.path.sep.join([str(tmpdir), 'src2.vcd'])
dest = os.path.sep.join([str(tmpdir), 'test_merged.vcd'])
with open(src1, 'w+') as ptr1, open(
src2, 'w+') as ptr2, open(dest, 'w+') as dest_fp:
with vcd.VCDWriter(
dest_fp, timescale=(10, 'ns'), date='today'
) as dest_wr, vcd.VCDWriter(
ptr1, timescale=(10, 'ns'), date='today'
) as src1_wr, vcd.VCDWriter(ptr2, timescale=(10, 'ns'),
date='today') as src2_wr:
counter_merge = dest_wr.register_var(src1[:-4], 'foobar',
'integer', size=32)
bartwo_merge = dest_wr.register_var(src1[:-4] + '.a', 'bar_two',
'reg', size=16)
counter8_merge = dest_wr.register_var(src1[:-4] + '.a', 'counter',
'integer', size=8)
lcounter_merge = dest_wr.register_var(src2[:-4], 'foobar2',
'integer', size=32)
logging.info('%s.a', src1[:-4])
counter_var1 = src1_wr.register_var('', 'foobar', 'integer',
size=32)
bartwo_var1 = src1_wr.register_var('a', 'bar_two', 'reg', size=16)
counter8_var1 = src1_wr.register_var('a', 'counter', 'integer',
size=8)
lcounter_var2 = src2_wr.register_var('', 'foobar2', 'integer',
size=32)
timestamp = 1
for i in range(1000, 10_000, 100):
src1_wr.change(counter_var1, timestamp, i)
src1_wr.change(bartwo_var1, timestamp, i % 2)
src1_wr.change(counter8_var1, timestamp, i % 256)
dest_wr.change(counter_merge, timestamp, i)
dest_wr.change(bartwo_merge, timestamp, i % 2)
dest_wr.change(counter8_merge, timestamp, i % 256)
if timestamp >= 20:
src2_wr.change(lcounter_var2, timestamp, i - 20)
dest_wr.change(lcounter_merge, timestamp, i - 20)
timestamp += 1
return (src1, src2, dest)
| 3,501 | 1,130 |
import discord, os
from discord.ext import commands
from discord.ext.commands import has_permissions, bot_has_permissions
from dotenv import load_dotenv
client = commands.Bot(command_prefix='-')
client.remove_command('help')
client.remove_command('reload')
# Loads a cog
@client.command()
@has_permissions(administrator=True)
@commands.is_owner()
async def load(ctx, extension):
await ctx.channel.purge(limit=1)
client.load_extension(f'cogs.{extension}')
await ctx.send(f"Loaded {extension} cog", delete_after=5)
# Unloads a cog
@client.command()
@has_permissions(administrator=True)
@commands.is_owner()
async def unload(ctx, extension):
await ctx.channel.purge(limit=1)
client.unload_extension(f'cogs.{extension}')
await ctx.send(f"Unloaded {extension} cog", delete_after=5)
# Reloads a cog
@client.command(aliases=['reload'])
@has_permissions(administrator=True)
@commands.is_owner()
async def _reload(ctx, extension):
await ctx.channel.purge(limit=1)
client.unload_extension(f'cogs.{extension}')
client.load_extension(f'cogs.{extension}')
await ctx.send(f"Reloaded {extension} cog", delete_after=5)
load_dotenv()
# Grabs cogs from cogs directory
for fname in os.listdir(os.getenv('COGS')):
if fname.endswith('.py'):
client.load_extension(f'cogs.{fname[:-3]}')
else:
print(f'Unable to load {fname[:-3]}')
client.run(os.getenv('TOKEN')) | 1,408 | 503 |
import sys
from setuptools import setup
from setuptools.extension import Extension
### unit tests for this package
import topicmodel_tests
### set include dirs for numpy
try:
import numpy
except ImportError:
numpy_already_installed = False
from distutils.sysconfig import get_python_lib
include_numpy_dir = get_python_lib()+"/numpy/core/include"
else:
numpy_already_installed = True
include_numpy_dir = numpy.get_include()
### Cython - rebuild the .c from the .pyx file if there, or if not, just use the .c
try:
from Cython.Distutils import build_ext
## from Cython.Build import cythonize
except ImportError:
use_cython = False
else:
use_cython = True
cmdclass = { }
ext_modules = [ ]
if use_cython:
ext_modules += [
Extension("topicmodels.samplers.samplers_lda",
["topicmodels/samplers/samplers_lda.pyx"],
include_dirs=[
include_numpy_dir,
],
)
]
cmdclass.update({ 'build_ext': build_ext })
else:
ext_modules += [
Extension("topicmodels.samplers.samplers_lda",
["topicmodels/samplers/samplers_lda.c"],
include_dirs=[
include_numpy_dir,
],
)
]
setup(name = "topic-modelling-tools",
version="0.6dev",
author="Stephen Hansen",
url="https://github.com/alan-turing-institute/topic-modelling-tools",
author_email="stephen.hansen@economics.ox.ac.uk",
ext_modules=ext_modules,
packages=['topicmodels', 'topicmodel_tests', 'topicmodels.LDA', 'topicmodels.multimix','topicmodels.samplers'],
package_data={'topicmodels': ['*.txt']},
cmdclass=cmdclass,
license="LICENSE",
description = "Python library that performs Latent Dirichlet Allocation using Gibbs sampling.",
long_description = open("README.md").read(),
install_requires=[
"numpy >= 1.13.3",
"nltk >= 3.2.4",
"pandas >= 0.20.3",
"scipy >= 0.19.1",
"Cython >= 0.20.1"
],
test_suite = 'topicmodel_tests.my_test_suite'
)
| 2,177 | 705 |
import datetime
from unittest import mock
import pytz
from django.core.exceptions import ValidationError
from django.db import IntegrityError
from django.test import TestCase
from game.models import Ownership, Game, Player, RealEstate
from game.tests.factories.ownership import OwnershipFactory
from game.tests.factories.player import PlayerFactory
from game.tests.factories.realestate import RealEstateFactory
class OwnershipTest(TestCase):
def test_create_ownership(self):
estate = RealEstateFactory()
player = PlayerFactory(game=estate.game)
my_ownership = Ownership(real_estate=estate, player=player)
my_ownership.full_clean()
my_ownership.save()
self.assertTrue(my_ownership in Ownership.objects.all())
self.assertEqual(my_ownership.real_estate, estate)
self.assertEqual(my_ownership.player, player)
def test_buy_timestamp(self):
estate = RealEstateFactory()
player = PlayerFactory(game=estate.game)
mocked = datetime.datetime(2021, 3, 3, 0, 2, 3, tzinfo=pytz.utc)
with mock.patch("django.utils.timezone.now", mock.Mock(return_value=mocked)):
my_ownership = Ownership(real_estate=estate, player=player)
my_ownership.save()
self.assertEqual(my_ownership.buy_timestamp, mocked)
def test_cannot_create_ownership_without_player(self):
ownership = Ownership(real_estate=RealEstateFactory())
with self.assertRaises(IntegrityError):
ownership.save()
def test_cannot_create_ownership_without_realestate(self):
ownership = Ownership(player=PlayerFactory())
with self.assertRaises(IntegrityError):
ownership.save()
def test_delete_ownership_does_not_delete_game_player_or_realestate(self):
ownership = OwnershipFactory()
player = ownership.player
real_estate = ownership.real_estate
game = ownership.player.game
self.assertEqual(player.game, real_estate.game)
ownership.delete()
self.assertTrue(ownership not in Ownership.objects.all())
self.assertTrue(game in Game.objects.all())
self.assertTrue(player in Player.objects.all())
self.assertTrue(real_estate in RealEstate.objects.all())
def test_cannot_player_and_realestate_from_different_game(self):
player = PlayerFactory()
estate = RealEstateFactory()
my_ownership = Ownership(player=player, real_estate=estate)
with self.assertRaises(ValidationError):
my_ownership.full_clean()
def test_delete_player_deletes_ownership(self):
ownership = OwnershipFactory()
ownership.save()
player = ownership.player
player.delete()
self.assertTrue(ownership not in Ownership.objects.all())
def test_delete_realestate_deletes_ownership(self):
ownership = OwnershipFactory()
ownership.save()
realestate = ownership.real_estate
realestate.delete()
self.assertTrue(ownership not in Ownership.objects.all())
def test_delete_game_deletes_ownership(self):
ownership = OwnershipFactory()
ownership.save()
game = ownership.player.game
game.delete()
self.assertTrue(ownership not in Ownership.objects.all())
| 3,311 | 979 |
# from constraint.node.SubstitutorVisitor import SubstitutorVisitor
from enum import Enum
class Node(object):
def __init__(self):
self.children = list()
self.node_type = NodeType.NODE
def get_vars(self, vars = set()):
# recursively crawls tree and writes down all the variables
# stop
if self.node_type == NodeType.VARIABLE:
vars.add(self.name)
# recursion
for child in self.children:
child.get_vars(vars)
return vars
# for visitor class. Using isinstance breaks when importing from outside
class NodeType(Enum):
NODE = 0
LEQ = 1
GEQ = 2
LESS = 3
GREATER = 4
EQ = 5
NEQ = 6
IN = 7
VARIABLE = 8
CONSTANT = 9
ADDITION = 10
SUBTRACTION = 11
MULTIPLICATION = 12
EXPONENTIAL = 13
def __eq__(self, other):
return self.value == other.value
class LEQ(Node):
def __init__(self, op1, op2):
Node.__init__(self)
self.node_type = NodeType.LEQ
self.children.append(op1)
self.children.append(op2)
def __str__(self):
return '(' + str(self.children[0]) + ' <= ' + str(self.children[1]) + ')'
class GEQ(Node):
def __init__(self, op1, op2):
Node.__init__(self)
self.node_type = NodeType.GEQ
self.children.append(op1)
self.children.append(op2)
def __str__(self):
return '(' + str(self.children[0]) + ' >= ' + str(self.children[1]) + ')'
class Less(Node):
def __init__(self, op1, op2):
Node.__init__(self)
self.node_type = NodeType.LESS
self.children.append(op1)
self.children.append(op2)
def __str__(self):
return '(' + str(self.children[0]) + ' < ' + str(self.children[1]) + ')'
class Greater(Node):
def __init__(self, op1, op2):
Node.__init__(self)
self.node_type = NodeType.GREATER
self.children.append(op1)
self.children.append(op2)
def __str__(self):
return '(' + str(self.children[0]) + ' > ' + str(self.children[1]) + ')'
class EQ(Node):
def __init__(self, op1, op2):
Node.__init__(self)
self.node_type = NodeType.EQ
self.children.append(op1)
self.children.append(op2)
def __str__(self):
return '(' + str(self.children[0]) + ' = ' + str(self.children[1]) + ')'
class NEQ(Node):
def __init__(self, op1, op2):
Node.__init__(self)
self.node_type = NodeType.NEQ
self.children.append(op1)
self.children.append(op2)
def __str__(self):
return '(' + str(self.children[0]) + ' != ' + str(self.children[1]) + ')'
class In(Node):
def __init__(self, op, op_low, op_up):
Node.__init__(self)
self.node_type = NodeType.IN
self.children.append(op)
self.children.append(op_low)
self.children.append(op_up)
def __str__(self):
return '(' + str(self.children[0]) + ')' + ' IN ' + '[ ' + str(self.children[1]) + ' , ' + str(self.children[2]) + ' ]'
class Variable(Node):
def __init__(self, name):
Node.__init__(self)
self.node_type = NodeType.VARIABLE
self.name = name
def __str__(self):
return self.name
class Constant(Node):
def __init__(self, value):
Node.__init__(self)
self.node_type = NodeType.CONSTANT
self.value = value
def __str__(self):
return str(self.value)
class Addition(Node):
def __init__(self, op1, op2):
Node.__init__(self)
self.node_type = NodeType.ADDITION
self.children.append(op1)
self.children.append(op2)
def __str__(self):
return '(' + str(self.children[0]) + ' + ' + str(self.children[1]) + ')'
class Subtraction(Node):
def __init__(self, op1, op2):
Node.__init__(self)
self.node_type = NodeType.SUBTRACTION
self.children.append(op1)
self.children.append(op2)
def __str__(self):
return '(' + str(self.children[0]) + ' - ' + str(self.children[1]) + ')'
class Multiplication(Node):
def __init__(self, op1, op2):
Node.__init__(self)
self.node_type = NodeType.MULTIPLICATION
self.children.append(op1)
self.children.append(op2)
def __str__(self):
return '(' + str(self.children[0]) + ' * ' + str(self.children[1]) + ')'
class Exponential(Node):
def __init__(self, op1):
Node.__init__(self)
self.node_type = NodeType.EXPONENTIAL
self.children.append(op1)
def __str__(self):
return '(' + 'EXP(' + str(self.children[1]) + ') ' + ')'
| 4,637 | 1,620 |
# Generated by Django 3.1.7 on 2021-03-28 20:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0003_auto_20210328_2029'),
]
operations = [
migrations.AlterField(
model_name='order',
name='shipped_date',
field=models.DateField(help_text='Date when order moved to Done status', null=True, verbose_name='shipped date'),
),
]
| 464 | 162 |
from .snippets import (
remove_repeating,
flare,
list_appending_long,
level_off,
)
import itertools
import copy
from core.models import RotatorAdminPanel
class PageLoad(object):
"""Zwraca tyle języków ile mamy zainstalowane
w ustawieniach w zakładce LANGUAGES w formacie naprzemiennym
pasującym do wzornika z dwoma wyjściowymi
(ID_Języka, Ścieżka_Flagi_Języka), oraz
Ładuje wszystkie podstawowe elementy w widoku strony."""
def __init__(self, *args):
lang_id = []
langsl = []
a = args[0]
b = args[1]
self.langs = []
locations = list(a.objects.all())
self.items = locations[0]
for item in b:
lang_id.append("lang_flag_" + str(item[0]))
x = len(lang_id) - 1
y = 0
while x + 1 > 0:
z = self.items.__dict__[lang_id[y]]
langsl.append(z)
x = x - 1
y = y + 1
self.langs = zip(lang_id, langsl)
# Funkcji używaj jeśli chcesz używać zmiennych skórek.
# Defaultuje do 0 jeśli nie wybierzesz żadnej.
def page_dress(self, **kwargs):
c = 0
s = kwargs["skins"]
if "choice" in kwargs:
c = int(kwargs["choice"])
self.skins = list(s.objects.all())
self.skin = self.skins[c]
self.skinctx = {
"skin": self.skin,
}
return self.skinctx
# Funkcja tworzy za nas podstawowy kontekst,
# który rozszerza się o dany w funkcji.
def lazy_context(self, **kwargs):
self.context = {
"items": self.items,
"langs": self.langs,
}
if "skins" in kwargs:
self.page_dress(**kwargs)
self.context.update(self.skinctx)
if "context" in kwargs:
self.context.update(kwargs["context"])
return self.context
# Nakładka na django do obsługi dodatku tłumaczeniowego django-modeltranslation.
# Bo tak jest po prostu łatwiej...
class PageElement(object):
def __init__(self, *args, **kwargs):
self.x = args[0]
self.listed = list(self.x.objects.all()) # Lista obiektów
self.allelements = self.x.objects.all() # Wszystkie obiekty
self.elements = self.x.objects # Obiekty
self.baseattrs = self.listed[0] # Pierwsze obiekty na liście
# Elementy po Id.
def by_id(self, **kwargs):
G404 = kwargs["G404"]
x_id = kwargs["id"]
one_by_id = G404(self.x, pk=x_id)
return one_by_id
# Klasa ładuje stronę po dodaniu opcji typu panel admina.
class PortalLoad(PageLoad):
def __init__(self, *args):
super().__init__(*args)
menus = args[2]
advert = args[3]
self.adverts = advert.objects.all()
self.adverts_listed = list(advert.objects.all())
self.menu = list(menus.objects.all())[0]
if len(self.adverts_listed) == 0:
self.adverts = False
def page_dress(self, **kwargs):
super().page_dress(**kwargs)
def lazy_context(self, **kwargs):
self.context = {
"items": self.items,
"langs": self.langs,
"menu": self.menu,
"adverts": self.adverts,
}
if "skins" in kwargs:
self.page_dress(**kwargs)
self.context.update(self.skinctx)
if "context" in kwargs:
self.context.update(kwargs["context"])
return self.context
# Klasa liczy interakcje w crop plannerze - (klasa-slave)
class PlannerRelationship(object):
def __init__(self, *args, **kwargs):
self.top_tier = kwargs['top_tier']
self.a = kwargs['a']
self.b = kwargs['b']
self.ifdict = {
"crop_to_crop": self.a[4].crop_relationships.filter(
about_crop__id=self.b[4].id),
}
self.seasondict = {
0: None,
1: "Summer",
2: "Winter",
}
def finishing(self, **kwargs):
interactiondict = {
# Interakcje po subkrokach:
0: [0, 0, True], # Współrzędne
1: [0, 1, True], # Allelopatyczne / Współrzędne i następcze
2: [1, 1, True], # Następcze
# Interakcje po krokach:
3: [2, 2, False], # W całym drugim roku
4: [3, 3, False], # W całym trzecim roku
5: [1, 2, False], # W pierwszym i drugim roku
6: [1, 1, False], # W całym następnym roku
7: [2, 3, False], # W drugim i trzecim roku
}
signdict = {1:False, 2:True}
self.given_list = kwargs['given_list']
season = self.seasondict[self.i.season_of_interaction]
if season == "Summer" or season is None:
if interactiondict[self.i.type_of_interaction][2]:
if (
self.a[3][1] == self.b[3][1] - interactiondict[self.i.type_of_interaction][0]
or self.a[3][1] == self.b[3][1] - interactiondict[self.i.type_of_interaction][1]
):
level_off(self.top_tier, self.a, self.b)
if self.i.interaction_sign != 0:
self.given_list.append(self.a + self.b + [signdict[self.i.interaction_sign]])
return self.given_list
else:
if (
self.a[3][0] == self.b[3][0] - interactiondict[self.i.type_of_interaction][0]
or self.a[3][0] == self.b[3][0] - interactiondict[self.i.type_of_interaction][1]
):
if self.i.interaction_sign != 0:
self.given_list.append(self.a + self.b + [signdict[self.i.interaction_sign]])
return self.given_list
def relationship(self, **kwargs):
if self.ifdict[kwargs['relationship']].exists():
for self.i in self.ifdict[kwargs['relationship']]:
self.finishing(given_list=kwargs['given_list'])
return self.given_list
# Klasa obchodzi błędy związane z używaniem wzornika
# CropPlanner tam gdzie nie potrzeba analizować treści.
class DummyCropPlanner(object):
def __init__(self, *args, **kwargs):
plan_id = kwargs['plan_id']
self.pe_rp_id = args[0]
self.pe_rs = args[1].objects.filter(from_plan=plan_id)
self.pe_rss = args[3].objects.filter(from_step__from_plan=plan_id)
err_crop_list = []
tabs = []
self.error_family_crops = {
"e_crops": err_crop_list,
"e_tabs": tabs,
}
listed_pe_rs = list(self.pe_rs)
top_tier_list = []
for item in listed_pe_rs:
top_tier_list.append(item.order)
top_tier_list.sort()
self.top_tier = top_tier_list[-1]
def basic_context(self, **kwargs):
self.context = {
"efcs": self.error_family_crops,
"plan": self.pe_rp_id,
"steps": self.pe_rs,
"substeps": self.pe_rss,
"top_tier": self.top_tier,
}
self.context.update(kwargs['context'])
return self.context
def top_tier(self):
return self.top_tier
# Klasa analizuje płodozmian pod kątem błędów i synergii.
class CropPlanner(object):
def __init__(self, *args, **kwargs):
plan_id = kwargs['plan_id']
self.pe_rp_id = args[0]
self.pe_rs = args[1].objects.filter(from_plan=plan_id)
self.pe_rss = args[3].objects.filter(from_step__from_plan=plan_id)
rss_object = args[3]
listed_pe_rs = list(self.pe_rs)
len_listed_pe_rs = len(listed_pe_rs)
cooldown_list = []
fabacae = []
top_tier_list = []
sub_index = 0
sub_index_2 = 0
self.substep_indices = []
for item in listed_pe_rs:
pe_rss_pack = args[3].objects.filter(from_step=item)
rss_list = []
for sub_item in pe_rss_pack:
rss_list.append(sub_item)
sub_index_2 += 1
self.substep_indices.append((sub_item, sub_index_2))
i4 = item.order
top_tier_list.append(i4)
vars = [cooldown_list, item, fabacae, sub_index]
sub_index = list_appending_long(rss_list, vars, rss_object)
cooldown_list.sort()
top_tier_list.sort()
self.clw = False
error_len_crops = []
cooldown_list1 = copy.deepcopy(cooldown_list)
self.top_tier = top_tier_list[-1]
for item in cooldown_list1:
item[3][0] += self.top_tier
cooldown_list2 = cooldown_list + cooldown_list1
err_tab_list = []
err_crop_list = []
crop_interaction_list = []
for item in cooldown_list:
if item[0] > len_listed_pe_rs:
error_len_crops.append(item[1])
self.clw = args[2].objects.filter(id__in=error_len_crops)
if not self.clw:
for a, b in itertools.permutations(cooldown_list2, 2):
if a[2] == b[2] and a[0] < b[0] and a[0]!=0 and b[0]!=0:
a[0]=b[0]
if a[2] == b[2] and a[3][0] - b[3][0] < a[0] and a[3][0] - b[3][0] > 0:
level_off(self.top_tier, a, b)
err_tab_list.append(a[3][0])
err_tab_list.append(b[3][0])
err_crop_list.append(a + b)
err_crop_list.append(b + a)
pr = PlannerRelationship(top_tier=self.top_tier, a=a, b=b)
pr.relationship(
given_list=crop_interaction_list,
relationship="crop_to_crop")
fabs = []
tabs = []
self.interactions = []
remove_repeating(fabs, fabacae)
remove_repeating(tabs, err_tab_list)
remove_repeating(self.interactions, crop_interaction_list)
fabs_percent = float(len(fabs)) / float(self.top_tier * 3)
fabs_rounded = round(fabs_percent, 3)
self.fabs_error = False
if fabs_rounded >= 0.25 and fabs_rounded <= 0.33:
pass
else:
self.fabs_error = int(fabs_rounded * 100)
self.fabs_error = str(self.fabs_error) + "%"
self.error_family_crops = {
"e_crops": err_crop_list,
"e_tabs": tabs,
}
def basic_context(self, **kwargs):
self.context = {
"subs_indices": self.substep_indices,
"interactions": self.interactions,
"f_error": self.fabs_error,
"efcs": self.error_family_crops,
"cr_len_warning": self.clw,
"plan": self.pe_rp_id,
"steps": self.pe_rs,
"substeps": self.pe_rss,
"top_tier": self.top_tier,
}
self.context.update(kwargs['context'])
return self.context
def top_tier(self):
return self.top_tier
def count_sources_pages(main_source):
sourcelist = []
for source in PageElement(main_source).allelements:
sourcelist.append([source.at_data_string, str(source.pages_from), str(source.pages_to)])
sourcelist1 = []
remove_repeating(sourcelist1, sourcelist)
sourcelist2 = copy.deepcopy(sourcelist1)
sourcelist3 = []
# Niewydajne - popraw!
for source in sourcelist1:
for source_bis in sourcelist2:
if source[0] == source_bis[0]:
if any(source[0] in sl for sl in sourcelist3):
for sublist in sourcelist3:
if source[0] in sublist:
if source[2] == "None":
sublist[1].append((source[1],))
else:
sublist[1].append((source[1], source[2]))
else:
sourcelist3.append([source[0], [(source[1], source[2])]])
sourcelist4 = []
for source in sourcelist3:
newsource = []
remove_repeating(newsource, source[1])
newsource.sort()
sourcelist4.append([source[0], newsource])
flare(sourcelist4)
# Do ładowania po raz pierwszy na serwer.
try:
edit_delay_sec = PageElement(RotatorAdminPanel).baseattrs.evaluated_plan_cooldown
lurk_delay_min = PageElement(RotatorAdminPanel).baseattrs.lurk_plan_cooldown
except:
edit_delay_sec = 60
lurk_delay_min = 15
| 12,327 | 4,311 |
import copy
import math
import random
import numpy as np
import torch
from game.Player import Player
class RLPlayer(Player):
def __init__(self, name, game_state, action_space, parameters={}):
super().__init__(name)
print(f"Player {name} parameters: {parameters}")
self.action_space = action_space
self.device = parameters["device"] if "device" in parameters else 'cpu'
self.map_extracter = MapFeatureExtractor(164, 164, 256).to(self.device)
observation = self.game_state_to_observation(game_state)
self.agent = NeuralQLearningAgent(len(observation), action_space, parameters=parameters)
def action(self, game_state, learning=False):
observation = self.game_state_to_observation(game_state)
action = self.agent.get_action(observation, learning)
return self.action_space[action]
def process_transition(self, game_state, action, reward, next_game_state, done):
observation = self.game_state_to_observation(game_state)
next_observation = self.game_state_to_observation(next_game_state)
if action == "left":
action = 0
elif action == "right":
action = 2
else:
action = 1
self.agent.process_transition(observation, action, reward, next_observation, done)
def game_state_to_observation(self, game_state):
# player_features = ["x", "y", "direction", "speed", "turn_speed", "marker_size", "no_clip"]
player_features = ["x", "y", "direction"]
observation = []
my_player = game_state["players"][self.player_index]
observation = observation + [float(my_player[feature]) for feature in player_features]
observation[0] = observation[0] / 500
observation[1] = observation[1] / 500
observation[2] = observation[2] / 360
# min_dist_to_border = np.min([my_player["x"], (500 - my_player["x"]), my_player["y"], (500 - my_player["y"])])
#
# observation.append(min_dist_to_border)
x, y = my_player["x"], my_player["y"]
radius = 60
board = game_state["board"]
features = []
for angle in [math.radians(my_player["direction"] + (a - 135)) for a in np.linspace(0, 270, 13)]:
coords = [round(x + radius * math.sin(angle)), round(y + radius * math.cos(angle))]
interpolated = np.linspace((x, y), coords, 10)
try:
is_obstacle = any([board[round(p[0]), round(p[1])] > 0 for p in interpolated[1:]])
except IndexError:
is_obstacle = True
features.append(float(is_obstacle))
observation += features
# board = game_state["board"].astype(np.float32)
# board[board > 0] = 255.0
# board = resize(board, (164, 164))
# board[0, :] = 255.0
# board[-1, :] = 255.0
# board[:, 0] = 255.0
# board[:, -1] = 255.0
#
# board_tensor = torch.from_numpy(board)
# board_tensor = board_tensor.view(1, 1, board_tensor.shape[0], board_tensor.shape[1]).to(self.device)
# map_features = self.map_extracter.forward(board_tensor)
# observation = observation + map_features.squeeze().tolist()
return np.array(observation, dtype=np.float32)
def save_model_weights(self, output_path):
torch.save(self.agent.q_dash.state_dict(), output_path + "Q")
torch.save(self.map_extracter.state_dict(), output_path + "me")
def load_model_weights(self, path, learning=False):
self.agent.q_dash.load_state_dict(torch.load(path + "Q"))
self.map_extracter.load_state_dict(torch.load(path + "me"))
if not learning:
self.agent.q_dash.eval()
self.map_extracter.eval()
class MapFeatureExtractor(torch.nn.Module):
def __init__(self, map_width, map_height, output_features, hidden_count=128):
super(MapFeatureExtractor, self).__init__()
self.cnn_layers = torch.nn.Sequential(
torch.nn.Conv2d(1, 8, kernel_size=5, stride=1, padding=1),
torch.nn.ReLU(inplace=True),
torch.nn.MaxPool2d(kernel_size=2, stride=2),
torch.nn.Conv2d(8, 16, kernel_size=5, stride=1, padding=1),
torch.nn.ReLU(inplace=True),
torch.nn.MaxPool2d(kernel_size=2, stride=2),
torch.nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=1),
torch.nn.ReLU(inplace=True),
torch.nn.MaxPool2d(kernel_size=2, stride=2),
torch.nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=1),
torch.nn.ReLU(inplace=True),
torch.nn.MaxPool2d(kernel_size=2, stride=2),
)
self.linear_layers = torch.nn.Sequential(
torch.nn.Linear(4096, output_features)
)
# Defining the forward pass
def forward(self, x):
x = self.cnn_layers(x)
x = x.view(x.size(0), -1)
x = self.linear_layers(x)
return x
class ActionApproximation(torch.nn.Module):
def __init__(self, state_observations_count, action_count, hidden_count=512):
super(ActionApproximation, self).__init__()
self.ReLU = torch.nn.ReLU()
self.dense0 = torch.nn.Linear(state_observations_count, hidden_count)
self.dense1 = torch.nn.Linear(hidden_count, hidden_count)
self.dense2 = torch.nn.Linear(hidden_count, action_count)
def forward(self, x):
x = x.float()
x = self.dense0(x)
x = self.ReLU(x)
x = self.dense1(x)
x = self.ReLU(x)
x = self.dense2(x)
return x
class Agent:
def __init__(self):
pass
def process_transition(self, observation, action, reward, next_observation, done):
raise NotImplementedError()
def get_action(self, observation, learning):
raise NotImplementedError()
class NeuralQLearningAgent(Agent):
def __init__(self, observation_len, action_space, parameters={}):
super().__init__()
# torch.manual_seed(42)
learning_episodes = parameters["learning_episodes"] if "learning_episodes" in parameters else 200
self.device = parameters["device"] if "device" in parameters else 'cpu'
self.action_space = action_space
# PARAMETERS
self.network_freezing = parameters["network_freezing"] if "network_freezing" in parameters else True
self.double_q_learning = parameters["double_q_learning"] if "double_q_learning" in parameters else True
self.batch_learning = parameters["batch_learning"] if "batch_learning" in parameters else True
self.initial_epsilon = parameters["epsilon"] if "epsilon" in parameters else 0.7
self.epsilon = self.initial_epsilon
self.gamma = parameters["gamma"] if "gamma" in parameters else 0.99
self.learning_rate = parameters["lr"] if "lr" in parameters else 0.001
self.memory_size = parameters["memory_size"] if "memory_size" in parameters else 10000
self.memory_start_learning = 1000
self.batch_size = 128
self.batch_refresh_interval = 1
self.network_freezing_i = 3000
# ...........
self.q_dash = ActionApproximation(observation_len, len(action_space)).to(self.device)
if self.network_freezing or self.double_q_learning:
self.q_dash2 = copy.deepcopy(self.q_dash).to(self.device)
self.loss_function = torch.nn.MSELoss()
self.optimizer = torch.optim.Adam(self.q_dash.parameters(), lr=self.learning_rate)
self.exploration_weights = [1 / 3, 1 / 3, 1 / 3]
self.memory = []
self.memory_index = 0
self.epsilon_decay_parameter = math.log(
0.02) / learning_episodes # (learning_episodes - (learning_episodes // 4))
self.total_episode_reward = 0
self.total_reward_memory = []
self.max_episode_reward = 0
self.episodes_finished = 0
self.steps = 0
def update_approximator(self, batch):
observation, action, reward, next_observation, done = batch[:, 0], batch[:, 1], batch[:, 2], batch[:, 3], batch[
:, 4]
observation = torch.from_numpy(np.array(observation.tolist())).to(self.device)
next_observation = torch.from_numpy(np.array(next_observation.tolist())).to(self.device)
y_pred = self.q_dash.forward(observation)
action = torch.from_numpy(action[:, np.newaxis].astype(np.int64)).to(self.device)
score = torch.gather(y_pred, 1, action)
score = torch.squeeze(score, 1)
if self.double_q_learning:
y_n = self.q_dash.forward(next_observation).to(self.device)
action_n = torch.argmax(y_n, 1, keepdim=True)
y_next = self.q_dash2.forward(next_observation).to(self.device)
elif self.network_freezing:
y_next = self.q_dash2.forward(next_observation).to(self.device)
else:
y_next = self.q_dash.forward(next_observation).to(self.device)
done = done.astype(np.bool_)
y_next[done] = 0.0
reward = torch.from_numpy(reward.astype(np.float32)).to(self.device)
if self.double_q_learning:
score_next = torch.gather(y_next, 1, action_n)
score_next = torch.squeeze(score_next, 1)
target = reward + (self.gamma * score_next)
else:
target = reward + (self.gamma * torch.max(y_next, 1).values)
target = target.float()
self.optimizer.zero_grad()
loss = self.loss_function(score, target)
loss.backward()
self.optimizer.step()
if (self.network_freezing or self.double_q_learning) and self.steps % self.network_freezing_i == 0:
self.q_dash2.load_state_dict(self.q_dash.state_dict())
def process_transition(self, observation, action, reward, next_observation, done):
self.steps += 1
self.total_episode_reward += reward
if done:
self.episodes_finished += 1
self.max_episode_reward = max(self.total_episode_reward, self.max_episode_reward)
self.total_reward_memory.append(self.total_episode_reward)
self.total_episode_reward = 0
if self.epsilon > 0.05:
self.epsilon = self.initial_epsilon * math.exp(self.episodes_finished * self.epsilon_decay_parameter)
if self.episodes_finished % 50 == 0:
print(f"Episode={self.episodes_finished}, epsilon={round(self.epsilon, 4)}, \
total_steps={self.steps}, max_reward={round(self.max_episode_reward, 4)}, steps_per_episode={round(self.steps / self.episodes_finished, 2)}")
if self.batch_learning:
el = (observation, action, reward, next_observation, done)
if len(self.memory) < self.memory_size:
self.memory.append(el)
if len(self.memory) < self.memory_start_learning:
return
else:
self.memory[self.memory_index] = el
self.memory_index = (self.memory_index + 1) % self.memory_size
if self.steps % self.batch_refresh_interval == 0:
batch = np.array(random.sample(self.memory, self.batch_size), dtype=object)
else:
return
else:
# One element batch
batch = np.array((observation, action, reward, next_observation, done), dtype=object)[np.newaxis, :]
self.update_approximator(batch)
def get_action(self, observation, learning):
if learning and random.random() < self.epsilon:
action = random.choices([0, 1, 2], k=1)[0]
return action
observation = torch.from_numpy(observation).to(self.device)
y_pred = self.q_dash.forward(observation)
action = torch.argmax(y_pred).item()
return action
| 11,972 | 3,948 |
from functools import partial
from typing import Tuple
import covasim as cv
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
def get_current_infected_ratio():
# Returns the current ratio of infected people in germany
number_infected = 651500 # https://www.deutschland.de/de/topic/politik/corona-in-deutschland-zahlen-und-fakten
number_total = 83100000 # https://www.destatis.de/DE/Themen/Gesellschaft-Umwelt/Bevoelkerung/Bevoelkerungsstand/_inhalt.html
infected_ratio = number_infected / number_total
return infected_ratio
delta_variant = cv.variant('delta', days=0) # delta is the dominant variant in germany
# Define baseline parameters
baseline_pars = dict(
start_day='2022-01-01',
n_days=60,
pop_type='hybrid',
pop_size=10_000,
pop_infected=int(get_current_infected_ratio() * 10000),
location='Germany',
use_waning=True, # use dynamically calculated immunity
n_beds_hosp=80, # https://tradingeconomics.com/germany/hospital-beds - 8 per 1000 people
n_beds_icu=62, # https://tradingeconomics.com/germany/icu-beds - 620 per 100.000 people
variants=[delta_variant],
)
def run_simulations(sim: cv.Sim, n_runs: int, confidence_level: float, method: str = "t") -> cv.MultiSim:
msim = cv.MultiSim(sim)
msim.run(n_runs=n_runs)
if method == "t": # use t-distribution
bounds = st.t.interval(alpha=confidence_level, df=n_runs - 1)[1]
else: # use normal distribution
bounds = st.norm.interval(alpha=confidence_level)[1]
bounds = bounds / np.sqrt(n_runs)
msim.mean(bounds=bounds)
return msim
def run_base_and_intervention(base_sim: cv.Sim, intervention_sim: cv.Sim, n_runs: int = 100,
confidence_level: float = 0.9) -> cv.MultiSim:
base_msim = run_simulations(base_sim, n_runs, confidence_level)
intervention_msim = run_simulations(intervention_sim, n_runs, confidence_level)
return cv.MultiSim([base_msim.base_sim, intervention_msim.base_sim])
# calculate by hand for reference
def calculate_mean_and_confidence(msim: cv.MultiSim, result_key: str, method: str = "t",
confidence_level: float = 0.9) -> Tuple[np.array, np.array, np.array]:
data = np.array([s.results[result_key] for s in msim.sims], dtype=float)
data_mean = np.mean(data, axis=0)
data_sem = st.sem(data, axis=0)
if method == "t":
conf_intervals = st.t.interval(alpha=confidence_level, df=data.shape[0] - 1, loc=data_mean, scale=data_sem)
else:
conf_intervals = st.norm.interval(alpha=confidence_level, loc=data_mean, scale=data_sem)
lower_band, upper_band = conf_intervals
return data_mean, lower_band, upper_band
# plot by hand for reference
def plot_with_bands(base_msim: cv.MultiSim, intervention_msim: cv.MultiSim, result_key: str, ax=None,
colors_base=("b", "c"), colors_intervention=("r", "tab:orange"), show_dates=False):
if ax is None:
_, ax = plt.subplots()
ax.set_title(result_key)
if show_dates:
x = base_msim.results['date']
else:
x = base_msim.results['t']
for sim, c in ((base_msim, colors_base), (intervention_msim, colors_intervention)):
data_mean, lower_band, upper_band = calculate_mean_and_confidence(sim, result_key)
ax.fill_between(x, lower_band, upper_band, alpha=.75, linewidth=0, label=f"{sim.label} band", color=c[1])
ax.plot(x, data_mean, label=sim.label, color=c[0])
if show_dates:
cv.date_formatter(sim=base_msim.base_sim, ax=ax)
else:
# show intervention as vertical line
for intervention in intervention_msim.base_sim.get_interventions():
intervention.plot_intervention(intervention_msim.base_sim, ax)
ax.legend()
return ax
def _inf_thresh(self: cv.Intervention, sim: cv.Sim, thresh: int):
''' Dynamically define on and off days with respect to the number of infected people.
See https://docs.idmod.org/projects/covasim/en/latest/tutorials/tut_interventions.html#Dynamic-triggering'''
if sim.people.infectious.sum() > thresh:
if not self.active:
self.active = True
self.t_on = sim.t
self.plot_days.append(self.t_on)
else:
if self.active:
self.active = False
self.t_off = sim.t
self.plot_days.append(self.t_off)
return [self.t_on, self.t_off]
def inf_thresh_callback(thresh: int = 500):
return partial(_inf_thresh, thresh=thresh)
def init_intervention_for_inf_thresh(c: cv.Intervention):
"""Setup attributes for `inf_thresh_callback`"""
c.t_on = np.nan
c.t_off = np.nan
c.active = False
c.plot_days = []
return c
| 4,761 | 1,713 |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 16 11:45:09 2021
@author: Michael ODonnell
@purpose: scrape NBA draft picks by year
"""
# import needed libraries
import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
# function to scrape a list of years of NBA Drafts
def scrape_draft_data(start_year = 2017, end_year = 2020, export = True):
# turn inputs into a list of years
if end_year > start_year:
years = list(range(end_year, start_year-1,-1))
elif end_year < start_year:
years = list(range(end_year, start_year+1))
else:
years = [start_year]
# create empty dataframe
final_df = pd.DataFrame(columns = ['Pk', 'Tm', 'Player', 'College', 'Yrs',
'G', 'MP', 'PTS', 'TRB', 'AST','FG%',
'3P%', 'FT%', 'MP', 'PTS', 'TRB', 'AST',
'WS', 'WS/48', 'BPM', 'VORP', 'round',
'year'])
# scape one year at a time
for y in years:
# define URL of draft class
url = f'https://www.basketball-reference.com/draft/NBA_{y}.html'
# create bs4 object using requests and bs4
response = requests.get(url)
# if response code != 200, print and exit
if response.status_code != 200:
print("invalid url response code:", response.status_code)
break
html = response.text
soup = BeautifulSoup(response.content, features = 'lxml')
column_names = [th.getText() for th in soup.findAll('tr', limit=2)[1].findAll('th')]
table_rows = soup.findAll('tr')[0:]
draft_picks = [[td.getText() for td in table_rows[i].findAll('td')]
for i in range(len(table_rows))]
# function to find length of each draft round
def find_draft_rounds(draft_picks:list):
# this will store number of picks in each round
round_cutoffs = []
# find empty lists, they indicate new draft round
for index, value in enumerate(draft_picks[2:]):
if value == []:
round_cutoffs.append(index)
# since there are always 2 empty lists in a row, only use 2nd
round_cutoffs = round_cutoffs[::2]
# print the total number of round in draft class
print(f"total rounds of the {y} draft:", len(round_cutoffs)+1)
print(f"picks per round in {y} draft:", round_cutoffs[0])
return round_cutoffs
# call find_draft_rounds on the data
round_cutoffs = find_draft_rounds(draft_picks)
# remove empty rows from draft_picks
draft_picks = [e for e in draft_picks if len(e) > 10]
# create dataframe for all draft_picks
draft_picks_df = pd.DataFrame(draft_picks, columns = column_names[1:])
print(f"total draft picks in the {y} draft:", len(draft_picks_df["Pk"]))
# create column for draft round and draft year
draft_picks_df["round"] = 1
draft_picks_df["year"] = y
# change column Pk to integer
draft_picks_df["Pk"] = pd.to_numeric(draft_picks_df["Pk"])
# assign correct draft round to each row
for index, picks in enumerate(round_cutoffs):
draft_picks_df.loc[(draft_picks_df.Pk > picks), "round"] = int(index)+2
# add draft picks to final_df (with all draft picks)
try:
final_df = final_df.append(draft_picks_df)
print(f"draft year {y} added to final dataframe")
except:
print(f"error with draft year {y}, data not collected")
# sleep for short duration before moving onto next year
print('='*5, f"end of year {y}", '='*5)
time.sleep(2)
# rename final_df columns
final_df = final_df.rename(columns = {final_df.columns[0]: "Pick",
final_df.columns[1]: "Team",
final_df.columns[4]: "Years",
final_df.columns[5]: "Career_Games",
final_df.columns[8]: "Career_Rb",
final_df.columns[9]: "Career_Ast",
final_df.columns[13]: "MPG",
final_df.columns[14]: "PPG",
final_df.columns[15]: "RbsPG",
final_df.columns[16]: "AstPG",
final_df.columns[7]: "Career_Pts",
final_df.columns[6]: "Career_Minutes"})
# export and return the dataframe
if export == True:
export_name = f"nba_draft_data_{start_year}_to_{end_year}" + ".csv"
final_df.to_csv(export_name, index = False)
return final_df
# function to scrape a list of years for NBA PLayer total stats
def scrape_player_total_stats(start_year = 2017, end_year = 2020,
export = True, sleep_time = 2):
# turn inputs into a list of years
if end_year > start_year:
years = list(range(end_year, start_year-1,-1))
elif end_year < start_year:
years = list(range(end_year, start_year+1))
else:
years = [start_year]
# create empty final dataframe to append to in for loop
player_total_stats = pd.DataFrame(columns = ['Player', 'Pos', 'Age', 'Tm', 'G',
'GS', 'MP', 'FG', 'FGA', 'FG%', '3P',
'3PA', '3P%', '2P', '2PA', '2P%',
'eFG%', 'FT', 'FTA', 'FT%', 'ORB',
'DRB', 'TRB', 'AST', 'STL', 'BLK',
'TOV', 'PF', 'PTS', 'year'])
# loop through each year in the list
for y in years:
# grab URLs for year y
totals_url = f'https://www.basketball-reference.com/leagues/NBA_{y}_totals.html'
# create bs4 object using requests and bs4
totals_response = requests.get(totals_url)
print(f"totals year {y} url response code:", totals_response.status_code)
html = totals_response.text
soup = BeautifulSoup(totals_response.content, features = 'lxml')
# grab table column names and rows
column_names = [th.getText() for th in soup.findAll('tr', limit=2)[0].findAll('th')]
table_rows = soup.findAll('tr')[0:]
player_stats = [[td.getText() for td in table_rows[i].findAll('td')]
for i in range(len(table_rows))]
# drop empty rows
player_stats = [e for e in player_stats if len(e) > 10]
# create dataframe for stats
player_stats_df = pd.DataFrame(player_stats, columns = column_names[1:])
# add year to dataframe
player_stats_df["year"] = y
print(len(player_stats_df['Player']), f"in the {y} season added to dataframe")
non_dup_stats = player_stats_df.drop_duplicates(subset = 'Player',
keep = 'first')
# quick pause before scraping next year
#print(f"pausing for {sleep_time} seconds")
time.sleep(sleep_time)
try:
player_total_stats = player_total_stats.append(non_dup_stats)
print(f"{y} total player stats data added to dataset")
print("length of total dataframe:", len(player_total_stats['Player']))
except:
print(f"error with year {y}, data not collected")
# sleep for short duration before moving onto next year
print('='*5, f"end of year {y}", '='*5)
time.sleep(sleep_time*.5)
# export and return the dataframe
if export == True:
export_name = f"player_totals_{start_year}_to_{end_year}" + ".csv"
player_total_stats.to_csv(export_name, index = False)
return player_total_stats
# function to scrape a list of years for NBA PLayer per game stats
def scrape_player_per_game_stats(start_year = 2018, end_year = 2021,
export = True, sleep_time = 2):
# turn inputs into a list of years
if end_year > start_year:
years = list(range(end_year, start_year-1,-1))
elif end_year < start_year:
years = list(range(end_year, start_year+1))
else:
years = [start_year]
# create empty final dataframe to append to in for loop
player_per_game_stats = pd.DataFrame(columns = ['Player', 'Pos', 'Age', 'Tm', 'G',
'GS', 'MP', 'FG', 'FGA', 'FG%', '3P',
'3PA', '3P%', '2P', '2PA', '2P%',
'eFG%', 'FT', 'FTA', 'FT%', 'ORB',
'DRB', 'TRB', 'AST', 'STL', 'BLK',
'TOV', 'PF', 'PTS', 'year'])
# loop through each year in the list
for y in years:
# grab URLs for year y
per_game_url = f'https://www.basketball-reference.com/leagues/NBA_{y}_per_game.html'
# create bs4 object using requests and bs4
per_game_response = requests.get(per_game_url)
print(f"per game stats year {y} url response code:", per_game_response.status_code)
html = per_game_response.text
soup = BeautifulSoup(per_game_response.content, features = 'lxml')
# grab table column names and rows
column_names = [th.getText() for th in soup.findAll('tr', limit=2)[0].findAll('th')]
table_rows = soup.findAll('tr')[0:]
player_stats = [[td.getText() for td in table_rows[i].findAll('td')]
for i in range(len(table_rows))]
# drop empty rows
player_stats = [e for e in player_stats if len(e) > 10]
# create dataframe for stats
player_stats_df = pd.DataFrame(player_stats, columns = column_names[1:])
# add year to dataframe
player_stats_df["year"] = y
print(len(player_stats_df['Player']), f"in the {y} season added to dataframe")
non_dup_stats = player_stats_df.drop_duplicates(subset = 'Player',
keep = 'first')
# quick pause before scraping next year
#print(f"pausing for {sleep_time} seconds")
time.sleep(sleep_time)
try:
player_per_game_stats = player_per_game_stats.append(non_dup_stats)
print(f"{y} player per game stats data added to dataset")
print("length of total dataframe:", len(player_per_game_stats['Player']))
except:
print(f"error with year {y}, data not collected")
# sleep for short duration before moving onto next year
print('='*5, f"end of year {y}", '='*5)
time.sleep(sleep_time)
# export and return the dataframe
if export == True:
export_name = f"player_per_game_{start_year}_to_{end_year}" + ".csv"
player_per_game_stats.to_csv(export_name, index = False)
return player_per_game_stats
# function to scrape a list of years for NBA PLayer total stats
def scrape_player_advanced_stats(start_year = 2019, end_year = 2021,
export = True, sleep_time = 2):
# turn inputs into a list of years
if end_year > start_year:
years = list(range(end_year, start_year-1,-1))
elif end_year < start_year:
years = list(range(end_year, start_year+1))
else:
years = [start_year]
# create empty final dataframe to append to in for loop
player_advanced_stats = pd.DataFrame(columns = ['Player', 'Pos', 'Age', 'Tm', 'G',
'MP', 'PER', 'TS%', '3PAr', 'FTr',
'ORB%', 'DRB%', 'TRB%', 'AST%', 'STL%',
'BLK%', 'TOV%', 'USG%', 'OWS', 'DWS',
'WS', 'WS/48', 'OBPM', 'DBPM', 'BPM',
'VORP', 'year'])
# loop through each year in the list
for y in years:
# grab URLs for year y
advanced_url = f'https://www.basketball-reference.com/leagues/NBA_{y}_advanced.html'
# create bs4 object using requests and bs4
advanced_url = requests.get(advanced_url)
print(f"per game stats year {y} url response code:", advanced_url.status_code)
html = advanced_url.text
soup = BeautifulSoup(advanced_url.content, features = 'lxml')
# grab table column names and rows
column_names = [th.getText() for th in soup.findAll('tr', limit=2)[0].findAll('th')]
table_rows = soup.findAll('tr')[0:]
player_stats = [[td.getText() for td in table_rows[i].findAll('td')]
for i in range(len(table_rows))]
# drop empty rows
player_stats = [e for e in player_stats if len(e) > 10]
# create dataframe for stats
player_stats_df = pd.DataFrame(player_stats, columns = column_names[1:])
# drop empty columns
player_stats_df = player_stats_df.drop(player_stats_df.columns[18],
axis = 1)
# add year to dataframe
player_stats_df["year"] = y
print(len(player_stats_df['Player']), f"in the {y} season added to dataframe")
non_dup_stats = player_stats_df.drop_duplicates(subset = 'Player',
keep = 'first')
# quick pause before scraping next year
#print(f"pausing for {sleep_time} seconds")
time.sleep(sleep_time)
try:
player_advanced_stats = player_advanced_stats.append(non_dup_stats,
sort=False)
print(f"{y} advanced player stats data added to dataset")
print("length of total dataframe:", len(player_advanced_stats['Player']))
except:
print(f"error with year {y}, data not collected")
# sleep for short duration before moving onto next year
print('='*5, f"end of year {y}", '='*5)
time.sleep(sleep_time)
# export and return the dataframe
if export == True:
export_name = f"player_advanced_{start_year}_to_{end_year}" + ".csv"
player_advanced_stats.to_csv(export_name, index = False)
return player_advanced_stats
# function to scrape a list of years for NBA PLayer shooting stats
def scrape_player_shooting_stats(start_year = 2019, end_year = 2021,
export = True, sleep_time = 2):
# turn inputs into a list of years
if end_year > start_year:
years = list(range(end_year, start_year-1,-1))
elif end_year < start_year:
years = list(range(end_year, start_year+1))
else:
years = [start_year]
# create empty final dataframe to append to in for loop
player_shooting_stats = pd.DataFrame(columns = ['Player', 'Pos', 'Age', 'Tm',
'G', 'MP', 'FG%', 'Avg_Distance',
'3P_FGassisted%', '3-10_FG%',
'10-16_FG%', '16-3P_FG%', '3P_FG%',
'Dunk_attempt%', '3P_FGassisted%',
'3-10_FG%', '10-16_FG%', '16-3P_FG%',
'3P_FG%', 'Dunk_attempt%',
'3P_FGassisted%', 'Dunk_attempt%',
'Dunk_attempts', 'Heave_makes',
'Corener3_3P_attempt%', 'Corner3_FG%',
'Heave_attempts', 'Heave_makes',
'year'])
# loop through each year in the list
for y in years:
# grab URLs for year y
shooting_url = f'https://www.basketball-reference.com/leagues/NBA_{y}_shooting.html'
# create bs4 object using requests and bs4
shooting_url = requests.get(shooting_url)
print(f"per game stats year {y} url response code:", shooting_url.status_code)
html = shooting_url.text
soup = BeautifulSoup(shooting_url.content, features = 'lxml')
# grab table column names and rows
column_names = [th.getText() for th in soup.findAll('tr', limit=2)[1].findAll('th')]
table_rows = soup.findAll('tr')[0:]
player_stats = [[td.getText() for td in table_rows[i].findAll('td')]
for i in range(len(table_rows))]
# drop empty rows
player_stats = [e for e in player_stats if len(e) > 10]
# create dataframe for stats
player_stats_df = pd.DataFrame(player_stats, columns = column_names[1:])
# drop empty columns
player_stats_df = player_stats_df.drop(player_stats_df.columns[8],
axis = 1)
# rename columns
column_mapping = {player_stats_df.columns[7] : 'Avg_Distance',
player_stats_df.columns[8] : '2P_attempt%',
player_stats_df.columns[9] : '0-3_attempt%',
player_stats_df.columns[10] : '3-10_attempt%',
player_stats_df.columns[11] : '10-16_attempt%',
player_stats_df.columns[12] : '16-3P_attempt%',
player_stats_df.columns[13] : '3P_attempt%',
player_stats_df.columns[14] : '2P_FG%', 9 : '0-3_attempt%',
player_stats_df.columns[15] : '3-10_FG%',
player_stats_df.columns[16] : '10-16_FG%',
player_stats_df.columns[17] : '16-3P_FG%',
player_stats_df.columns[18] : '3P_FG%',
player_stats_df.columns[19] : '2P_FGassisted%',
player_stats_df.columns[20] : '3P_FGassisted%',
player_stats_df.columns[21] : 'Dunk_attempt%',
player_stats_df.columns[22] : 'Dunk_attempts',
player_stats_df.columns[24] : 'Corener3_3P_attempt%',
player_stats_df.columns[25] : 'Corner3_FG%',
player_stats_df.columns[26] : 'Heave_attempts',
player_stats_df.columns[27] : 'Heave_makes'}
player_stats_df = player_stats_df.rename(columns = column_mapping)
# add year to dataframe
player_stats_df["year"] = y
print(len(player_stats_df['Player']), f"in the {y} season added to dataframe")
non_dup_stats = player_stats_df.drop_duplicates(subset = 'Player',
keep = 'first')
# quick pause before scraping next year
#print(f"pausing for {sleep_time} seconds")
time.sleep(sleep_time)
try:
player_shooting_stats = player_shooting_stats.append(non_dup_stats,
sort=False)
print(f"{y} player shooting stats data added to dataset")
print("length of total dataframe:", len(player_shooting_stats['Player']))
except:
print(f"error with year {y}, data not collected")
# sleep for short duration before moving onto next year
print('='*5, f"end of year {y}", '='*5)
time.sleep(sleep_time)
# export and return the dataframe
if export == True:
export_name = f"player_shooting_{start_year}_to_{end_year}" + ".csv"
player_shooting_stats.to_csv(export_name, index = False)
return player_shooting_stats
# function to scrape All Stars by year
def scrape_all_stars(export = True):
# grab wikipedia URL of all-stars
url = 'https://en.wikipedia.org/wiki/List_of_NBA_All-Stars'
# create bs4 object using requests and bs4
response = requests.get(url)
print(f"all-stars url response code:", response.status_code)
soup = BeautifulSoup(response.text, 'html.parser')
# grab full table
nba_table = soup.findAll('table')[1]
# turn table to dataframe
all_stars_df = pd.read_html(str(nba_table))
all_stars_df = pd.DataFrame(all_stars_df[0])
# add hall of fame denomination to dataframe
for idx, row in all_stars_df.iterrows():
if '*' in row["Player"]:
all_stars_df.loc[idx, "hall_of_fame"] = 1
all_stars_df.loc[idx, "active_player"] = 0
all_stars_df.loc[idx, "hof_eligible"] = 1
elif '^' in row["Player"]:
all_stars_df.loc[idx, "hall_of_fame"] = 0
all_stars_df.loc[idx, "active_player"] = 1
all_stars_df.loc[idx, "hof_eligible"] = 0
elif '†' in row["Player"]:
all_stars_df.loc[idx, "hall_of_fame"] = 0
all_stars_df.loc[idx, "active_player"] = 0
all_stars_df.loc[idx, "hof_eligible"] = 0
else:
all_stars_df.loc[idx, "hall_of_fame"] = 0
all_stars_df.loc[idx, "active_player"] = 0
all_stars_df.loc[idx, "hof_eligible"] = 1
# remove extra characters from PLayer columns
for c in "*^†":
all_stars_df["Player"] = all_stars_df["Player"].str.replace(c, '')
#for a in [r"[a]", r"[b]"]:
# #substring = f"[{a}]"
# all_stars_df["Player"] = all_stars_df["Player"].str.replace(a, '')
# delete extra columns
all_stars_df = all_stars_df.drop('Reference', 1)
# rename columns
# rename final_df columns
all_stars_df = all_stars_df.rename(columns =
{all_stars_df.columns[1]: "Selections",
all_stars_df.columns[2]: "Years"})
# export and return the dataframe
if export == True:
export_name = "nba_all_stars.csv"
all_stars_df.to_csv(export_name, index = False)
return all_stars_df
# function to scrape a list of years for NBA PLayer shooting stats
def scrape_player_salaries(start_year = 2015, end_year = 2016,
export = True, sleep_time = 2):
# turn inputs into a list of years
if end_year > start_year:
years = list(range(end_year, start_year-1,-1))
elif end_year < start_year:
years = list(range(end_year, start_year+1))
else:
years = [start_year]
# create empty final dataframe to append to in for loop
player_contracts = pd.DataFrame(columns = ['Player', 'Salary', 'Rank', 'Year'])
# loop through each year in the list
for y in years:
# grab URLs for year y
y1 = y+1
contracts_url = f'https://hoopshype.com/salaries/players/{y}-{y1}/'
# create bs4 object using requests and bs4
response = requests.get(contracts_url)
print(f"contracts year {y} url response code:", response.status_code)
html = response.text
soup = BeautifulSoup(html, features = 'html.parser')
# grab table column names and rows
salary_table = soup.find('table')
length=len(salary_table.find_all("td"))
players = [salary_table.find_all("td")[i].text.strip() for i in range(5,length,4)]
salaries = [salary_table.find_all("td")[i].text.strip() for i in range(6,length,4)]
# turn rows into dataframe
salary_df = pd.DataFrame({"Player" : players,
"Salary" : salaries,
"Rank" : [i for i in range(1, len(salaries)+1)]})
salary_df["Year"] = y
# add year to dataframe
print(len(salary_df['Player']), f"in the {y} season added to dataframe")
# quick pause before scraping next year
time.sleep(sleep_time)
try:
player_contracts = player_contracts.append(salary_df, sort=False)
print(f"{y} player contracts added to dataset")
print("length of total dataframe:", len(player_contracts['Player']))
except:
print(f"error with year {y}, data not collected")
# sleep for short duration before moving onto next year
print('='*5, f"end of year {y}", '='*5)
time.sleep(sleep_time)
# export and return the dataframe
if export == True:
export_name = f"player_contracts_{start_year}_to_{end_year}" + ".csv"
player_contracts.to_csv(export_name, index = False)
return player_contracts | 25,812 | 7,804 |
import urlparse,urllib2,re
import datetime,logging
from marcbots import MARCImportBot
from pymarc import Field
class AlexanderStreetPressBaseBot(MARCImportBot):
"""
`AlexanderStreetPressBaseBot` encapsulates the basic
MARC record changes used by child classes.
"""
def __init__(self,marc_file,**kwargs):
"""
Creates instance of `AlexanderStreetPressBaseBot`
Parameters:
- `marc_file`: Alexander Street Press MARC records
- `asp_code`: Alexander Street Press Code, default is asp
"""
MARCImportBot.__init__(self,marc_file)
if not kwargs.has_key('asp_code'):
self.asp_code = 'asp'
else:
self.asp_code = kwargs.get('asp_code')
self.resolved_baseurl = None
def getResolvedURL(self,
marc_record):
"""
Method extract's base resolved url from marc_record, sets
class variable for further processing.
Parameters:
- `marc_record`: MARC record, required
"""
field856 = marc_record.get_fields('856')[0]
raw_url = field856.get_subfields('u')[0]
redirect = urllib2.urlopen(raw_url)
redirect_url = urlparse.urlparse(redirect.geturl())
query_prefix = redirect_url.query.split("=")[0]
self.resolved_baseurl = "%s://%s%s?%s" % (redirect_url.scheme,
redirect_url.netloc,
redirect_url.path,
query_prefix)
def remove440(self,marc_record):
"""
Method removes 440 Series Statement field.
Parameters:
- `marc_record`: MARC record, required
"""
return self.__remove_field__(marc_record=marc_record,
tag='440')
def remove490(self,marc_record):
"""
Method removes 490 Series Statement field.
Parameters:
- `marc_record`: MARC record, required
"""
return self.__remove_field__(marc_record=marc_record,
tag='490')
def remove830(self,marc_record):
"""
Method removes MARC 830 field
Parameters:
- `marc_record`: MARC record, required
"""
return self.__remove_field__(marc_record=marc_record,
tag='830')
def validate506(self,marc_record):
"""
Method adds 506 field
Parameters:
- `marc_record`: MARC record, required
"""
marc_record = self.__remove_field__(marc_record=marc_record,
tag='506')
new506 = Field(tag='506',
indicators=[' ',' '],
subfields=['a','Access limited to subscribers.'])
marc_record.add_field(new506)
return marc_record
def validate533(self,marc_record):
"""
Method removes subfield n if exists in field 533
Parameters:
- `marc_record`: MARC record, required
"""
all533fields = marc_record.get_fields('533')
for field in all533fields:
marc_record.remove_field(field)
field.delete_subfield('n')
marc_record.add_field(field)
return marc_record
def validate710(self,marc_record):
"""
Method adds MARC 710 field, Corporate Heading
Parameters:
- `marc_record`: MARC Record, required
"""
new710field = Field(tag='710',
indicators=['2',' '],
subfields=['a','Alexander Street Press.'])
marc_record.add_field(new710field)
return marc_record
def validate730(self,
marc_record,
uniform_title):
"""
Methods adds MARC 730 field, Added entry: uniform title
Parameters:
- `marc_record`: MARC record, required
- `uniform_title`: Uniform title, required
"""
new730field = Field(tag='730',
indicators=['0',' '],
subfields=['a',uniform_title])
marc_record.add_field(new730field)
return marc_record
def validateURLs(self,
marc_record,
proxy_location,
public_note=None):
"""
Method retrieves URL from 856 field, retrieves redirected
url and sets new value to existing 856, calls processURLs
method and returns result
Parameters:
- `marc_record`: MARC record, required
- `proxy_location`: Proxy location, required
"""
all856s = marc_record.get_fields('856')
for field856 in all856s:
raw_url = urlparse.urlparse(field856.get_subfields('u')[0])
record_id = raw_url.query.split(";")[1]
new_url = "%s=%s" % (self.resolved_baseurl,record_id)
field856.delete_subfield('u')
field856.add_subfield('u',new_url)
if public_note:
return self.processURLs(marc_record=marc_record,
proxy_location=proxy_location,
public_note=public_note)
return self.processURLs(marc_record=marc_record,
proxy_location=proxy_location)
class AlexanderStreetPressMusicBot(AlexanderStreetPressBaseBot):
"""
The `AlexanderStreetPressMusicBot` reads MARC records for
Alexander Street Press music databases including American
Song, Jazz Music Library, among others and modifies to
CC standards.
"""
DATABASES = {'American song':{'code':'amso',
'proxy':'0-amso.alexanderstreet.com.tiger.coloradocollege.edu'},
'Classical music library':{'code':'clmu',
'proxy':'0-clmu.alexanderstreet.com.tiger.coloradocollege.edu'},
'Contemporary world music':{'code':'womu',
'proxy':'0-womu.alexanderstreet.com.tiger.coloradocollege.edu'},
'Jazz music library':{'code':'jazz',
'proxy':'0-jazz.alexanderstreet.com.tiger.coloradocollege.edu'},
'Smithsonian global sounds for libraries':{'code':'glmu',
'proxy':'0-glmu.alexanderstreet.com.tiger.coloradocollege.edu'}}
def __init__(self,**kwargs):
"""
Creates instance of `AlexanderStreetPressMusicBot`
Parameters:
- `marc_file`: MARC file, required
- `type_of`: ASP music database, required
"""
#if not kwargs.has_key('type_of'):
# raise ValueError("AlexanderStreetPressMusicBot requires type_of")
self.type_of = kwargs.get('type_of')
self.code_dict = {}
for k,v in self.DATABASES.iteritems():
self.code_dict[v['code']] = k
if self.type_of is not None:
if not self.code_dict.has_key(self.type_of):
raise ValueError('Unknown database: %s' % self.type_of)
AlexanderStreetPressBaseBot.__init__(self,
marc_file=kwargs.get('marc_file'),
asp_code=self.type_of)
def getResolvedURL(self,
marc_record):
"""
Overrides parent method, ASP music databases resolves to a different URL
pattern than other ASP databases.
Parameters:
- `marc_record`: MARC record, required
"""
field856 = marc_record.get_fields('856')[0]
raw_url = field856.get_subfields('u')[0]
redirect = urllib2.urlopen(raw_url)
redirect_url = urlparse.urlparse(redirect.geturl())
self.resolved_baseurl = 'http://%s/View/' % redirect_url.netloc.lower()
def processRecord(self,
marc_record):
"""
Method process a single MARC record for Alexander Street Press Music
databases.
Parameters:
- `marc_record`: MARC record, required
"""
if not self.resolved_baseurl:
self.getResolvedURL(marc_record)
marc_record = self.validate006(marc_record)
marc_record = self.validate007(marc_record)
marc_record = self.remove020(marc_record)
marc_record = self.validate245(marc_record)
marc_record = self.validate300(marc_record)
marc_record = self.remove440(marc_record)
marc_record = self.remove490(marc_record)
marc_record = self.validate506(marc_record)
marc_record = self.validate533(marc_record)
marc_record = self.validate710(marc_record)
marc_record = self.validate730(marc_record,
'%s.' % self.type_of)
marc_record = self.remove830(marc_record)
marc_record = self.validateURLs(marc_record)
return marc_record
def remove020(self,marc_record):
"""
Removes MARC 020 ISBN field
Paramaters:
- `marc_record`: MARC record, required
"""
return self.__remove_field__(marc_record=marc_record,
tag='020')
def validate006(self,marc_record):
"""
Validated 006 with CC standard for sound format
'm||||||||h||||||||'
Paramaters:
- `marc_record`: MARC record, required
"""
all006fields = marc_record.get_fields('006')
for field in all006fields:
marc_record.remove_field(field)
new006 = Field(tag='006',
indicators=[' ',' '],
data=r'm h ')
marc_record.add_field(new006)
return marc_record
def validate007(self,marc_record):
"""
Validates 007 fields, if data is sound resource keep, otherwise
change value to CC standard.
:param marc_record: MARC record, required
:rtype marc_record:
"""
all007s = marc_record.get_fields('007') # Could be Sean Connery, Roger Moore
# Pierce Bronson, or Daniel Craig
# JOKE!
for field007 in all007s:
if field007.data.startswith('cr'):
field007.data = r'cr u'
return marc_record
def validate300(self,marc_record):
"""
Validates MARC 300 field set subfield a to 'Streaming audio'
Parameters:
- `marc_record`: MARC Record, required
"""
marc_record = self.__remove_field__(marc_record=marc_record,
tag='300')
new300 = Field(tag='300',
indicators=[' ',' '],
subfields=['a','Streaming audio'])
marc_record.add_field(new300)
return marc_record
def validateURLs(self,marc_record):
"""
Validates 856 fields specifically for various types of Alexander
Street Press music databases.
Parameters:
- `marc_record`: MARC record, required
"""
proxy_location = self.DATABASES[self.code_dict[self.type_of]]['proxy']
all856s = marc_record.get_fields('856')
for field856 in all856s:
raw_url = urlparse.urlparse(field856.get_subfields('u')[0])
record_id = raw_url.query.split(";")[1]
new_url = "%s%s" % (self.resolved_baseurl,record_id)
field856.delete_subfield('u')
field856.add_subfield('u',new_url)
return self.processURLs(marc_record=marc_record,
proxy_location=proxy_location,
public_note='Listen online')
class BlackDramaBot(AlexanderStreetPressBaseBot):
"""
The `BlackDramaBot` reads MARC records from the 2nd Edition
of the Alexander Street Press Black Drama and supplements
database.
"""
def __init__(self,**kwargs):
"""
Creates instance of `BlackDramaBot`
Parameters:
- `marc_file`: MARC file, required
"""
#if not kwargs.has_key('marc_file'):
# raise ValueError("BlackDramaBot requires a MARC file")
AlexanderStreetPressBaseBot.__init__(self,
marc_file=kwargs.get('marc_file'),
asp_code='aspbd2')
def processRecord(self,
marc_record):
"""
Method process a single marc_record Black Drama 2nd Edition database
Parameters:
- `marc_record`: MARC record, required
"""
if not self.resolved_baseurl:
self.getResolvedURL(marc_record)
marc_record = self.validate006(marc_record)
marc_record = self.validate007(marc_record)
marc_record = self.validate245(marc_record)
marc_record = self.validate250(marc_record)
marc_record = self.remove440(marc_record)
marc_record = self.remove490(marc_record)
marc_record = self.validate506(marc_record)
marc_record = self.validate533(marc_record)
marc_record = self.validate710(marc_record)
marc_record = self.validate730(marc_record,
'Black drama.')
marc_record = self.remove830(marc_record)
marc_record = self.validateURLs(marc_record,
'0-solomon.bld2.alexanderstreet.com.tiger.coloradocollege.edu')
return marc_record
def validate250(self,marc_record):
"""
Method adds edition statement to a new 250 field.
Parameters:
- `marc_record`: MARC record, required
"""
new250 = Field(tag='250',
indicators=[' ',' '],
subfields=['a','2nd ed.'])
marc_record.add_field(new250)
return marc_record
class WomenSocialMovementsBot(AlexanderStreetPressBaseBot):
"""
The `WomenSocialMovementsBotBot` reads MARC records from
Alexander Street Press Women and Social Movements database.
"""
def __init__(self,**kwargs):
"""
Creates instance of `WomenSocialMovementsBot`
Parameters:
- `marc_file`: MARC file
"""
AlexanderStreetPressBaseBot.__init__(self,
marc_file=kwargs.get('marc_file'),
asp_code='aspw')
def processRecord(self,
marc_record):
"""
Method processes a single marc_record for Women and Social Movements database.
Parameters:
- `marc_record`: MARC record
"""
if not self.resolved_baseurl:
self.getResolvedURL(marc_record)
marc_record = self.validate007(marc_record)
marc_record = self.validate245(marc_record)
marc_record = self.remove440(marc_record)
marc_record = self.remove490(marc_record)
marc_record = self.validate506(marc_record)
marc_record = self.validate533(marc_record)
marc_record = self.validateURLs(marc_record,
"0-asp6new.alexanderstreet.com.tiger.coloradocollege.edu")
marc_record = self.validate710(marc_record)
marc_record = self.validate730(marc_record,
"Women and social movements in the United States 1600-2000: Scholar's edition.")
marc_record = self.remove830(marc_record)
return marc_record
def validate001(self,marc_record):
"""
Method follows Prospector best practices for 001 MARC
field.
Parameters:
- `marc_record`: MARC record, required
"""
field001 = marc_record.get_fields('001')[0]
marc_record.remove_field(field001)
return marc_record
class GarlandEWMOBot(AlexanderStreetPressBaseBot):
"""
The `GarlandEWMOBot` process the MARC record
for the Alexander Street Press Garland Encyclopedia of Music World
Online electronic resource.
"""
def __init__(self,**kwargs):
"""
Creates instance of `GarlandEWMOBot`
Parameters:
- `marc_file`: MARC file
"""
AlexanderStreetPressBaseBot.__init__(self,
marc_file=kwargs.get('marc_file'),
asp_code='aspglnd')
def getResolvedURL(self,
marc_record):
"""
Overrides parent method, ASP music databases resolves to a different URL
pattern than other ASP databases.
Parameters:
- `marc_record`: MARC record, required
"""
field856 = marc_record.get_fields('856')[0]
raw_url = field856.get_subfields('u')[0]
redirect = urllib2.urlopen(raw_url)
redirect_url = urlparse.urlparse(redirect.geturl())
self.resolved_baseurl = 'http://%s/View/' % redirect_url.netloc.lower()
def processRecord(self,
marc_record):
"""
Method processes a single marc_record for Garland Encyclopedia of
Music World Online electronic resource.
Parameters:
- `marc_record`: MARC record
"""
if not self.resolved_baseurl:
self.getResolvedURL(marc_record)
marc_record = self.validate007(marc_record)
marc_record = self.validate245(marc_record)
marc_record = self.remove440(marc_record)
marc_record = self.remove490(marc_record)
marc_record = self.validate506(marc_record)
marc_record = self.validate533(marc_record)
marc_record = self.validateURLs(marc_record)
marc_record = self.validate710(marc_record)
marc_record = self.validate730(marc_record,
"The Garland Encyclopedia of World Music Online")
marc_record = self.remove830(marc_record)
return marc_record
def validateURLs(self,marc_record):
"""
Validates 856 fields specifically for various types of Alexander
Street Press music databases.
Parameters:
- `marc_record`: MARC record, required
"""
all856s = marc_record.get_fields('856')
proxy_location = "0-glnd.alexanderstreet.com.tiger.coloradocollege.edu"
for field856 in all856s:
raw_url = urlparse.urlparse(field856.get_subfields('u')[0])
record_id = raw_url.query.split(";")[1]
new_url = "%s%s" % (self.resolved_baseurl,record_id)
field856.delete_subfield('u')
field856.add_subfield('u',new_url)
return self.processURLs(marc_record=marc_record,
proxy_location=proxy_location)
| 19,651 | 6,013 |
class TestGetter:
""" Interface for getting tests. Useful because I'd like to both be able to evolve a next test as well as play a normal game of chess with the user """
#I thought about using abstract base class for this but it feels like overkill
def getNextTest(self, opponents, previousTest): raise NotImplementedError | 335 | 85 |
from sqlalchemy import mapper, util, Query, exceptions
import types
def monkeypatch_query_method(ctx, class_, name):
def do(self, *args, **kwargs):
query = Query(class_, session=ctx.current)
return getattr(query, name)(*args, **kwargs)
setattr(class_, name, classmethod(do))
def monkeypatch_objectstore_method(ctx, class_, name):
def do(self, *args, **kwargs):
session = ctx.current
if name == "flush":
# flush expects a list of objects
self = [self]
return getattr(session, name)(self, *args, **kwargs)
setattr(class_, name, do)
def assign_mapper(ctx, class_, *args, **kwargs):
validate = kwargs.pop('validate', False)
if not isinstance(getattr(class_, '__init__'), types.MethodType):
def __init__(self, **kwargs):
for key, value in kwargs.items():
if validate:
if not key in self.mapper.props:
raise exceptions.ArgumentError("Invalid __init__ argument: '%s'" % key)
setattr(self, key, value)
class_.__init__ = __init__
extension = kwargs.pop('extension', None)
if extension is not None:
extension = util.to_list(extension)
extension.append(ctx.mapper_extension)
else:
extension = ctx.mapper_extension
m = mapper(class_, extension=extension, *args, **kwargs)
class_.mapper = m
class_.query = classmethod(lambda cls: Query(class_, session=ctx.current))
for name in ['get', 'select', 'select_by', 'selectfirst', 'selectfirst_by', 'selectone', 'get_by', 'join_to', 'join_via', 'count', 'count_by', 'options', 'instances']:
monkeypatch_query_method(ctx, class_, name)
for name in ['flush', 'delete', 'expire', 'refresh', 'expunge', 'merge', 'save', 'update', 'save_or_update']:
monkeypatch_objectstore_method(ctx, class_, name)
return m
| 1,915 | 575 |
UNDO_EMPTY = 'undostack-empty'
UNDO_NOT_EMPTY = 'undostack-not-empty'
REDO_EMPTY = 'redostack-empty'
REDO_NOT_EMPTY = 'redostack-not-empty'
UNDO_CHANGED = 'undostack-changed'
REDO_CHANGED = 'redostack-changed'
| 210 | 100 |
import discord
from discord.ext import commands
from os import system
class Mod:
def __init__(self, client):
self.client = client
# ------------------------------- COMMANDS ------------------------------- #
# Commands need to have the @commands.command decorator
@commands.command(hidden=True, pass_context=True)
async def stop(self, ctx):
channel = ctx.message.channel
async for message in self.client.logs_from(channel, 1):
pass
await self.client.delete_message(message)
await self.client.logout()
@commands.command(hidden=True)
async def load(self, ext):
try:
self.client.load_extension(ext)
print(f'{ext} loaded successfully.')
except Exception as e:
print(f'{ext} cannot be loaded. [{e}]')
@commands.command(hidden=True)
async def unload(selc, ext):
try:
self.client.unload_extension(ext)
print(f'{ext} unloaded successfully.')
except Exception as e:
print(f'{ext} cannot be unloaded. [{e}]')
@commands.command(hidden=True, pass_context=True)
async def clear(self, ctx, amount):
"""Clears an amount of messages"""
channel = ctx.message.channel
# for i in range(int(amount) + 1):
# try:
messages = []
async for message in self.client.logs_from(channel, int(amount) + 1):
messages.append(message)
for i in range(len(messages)):
await self.client.delete_message(messages[i])
# -------------------------------- EVENTS -------------------------------- #
# Events do not need a decorator in cogs
async def on_message_delete(self, msg):
author = msg.author
content = msg.content
channel = msg.channel
print(f'{author} removed: "{content}" from {channel}')
def setup(client):
client.add_cog(Mod(client))
#
#
# @client.command(pass_context=True)
# async def clear_all(ctx):
# channel = ctx.message.channel
# count = 0
#
# while True:
# messages = []
# async for message in client.logs_from(channel, 100):
# messages.append(message)
# length = len(messages)
#
# if length == 1:
# await client.delete_message(messages[0])
# elif length > 100:
# print(f'ERROR: Messages had {length} messages which is over the 100 limit')
# messages = messages[:100]
# await client.delete_messages(messages)
#
# if length == 0:
# break
# else:
# print(length, 'messages being deleted')
# count += length
# print('Total messages deleted:', count)
#
#
# @client.command()
# async def spam(*args):
# amount = int(args[0])
# if len(args) > 1:
# time = float(args[1])
# else:
# time = 1
# for i in range(int(amount)):
# await client.say('Spam!')
# await asyncio.sleep(float(time))
#
#
# @client.command()
# async def list_ch():
# s = '\n'.join([ch for ch in all_channels])
# await client.say(s)
#
#
# @client.command()
# async def tell(*args):
# room = args[0]
# text = ' '.join(args[1:])
#
# await client.send_message(all_channels[room], text)
#
#
# @client.command()
# async def bot_game(*args):
# game = discord.Game(name=' '.join(args))
# await client.change_presence(game=game)
# class Fun:
# def __init__(self, client):
# self.client = client
#
# @commands.commands()
# async def
| 3,564 | 1,094 |
"""
Example showing use of the jsgf.ext DictationGrammar class for matching and
compiling rules that use regular JSGF expansions like Literal and Sequence as
well as Dictation expansions.
"""
from jsgf import PublicRule, Sequence
from jsgf.ext import Dictation, DictationGrammar
def main():
# Create a simple rule using a Dictation expansion.
rule = PublicRule("Hello_X", Sequence("hello", Dictation()))
# Create a new DictationGrammar using the simple rule.
grammar = DictationGrammar([rule])
# Print the compiled grammar
print(grammar.compile())
# Match against some speech strings.
# find_matching_rules has an optional second parameter for advancing to
# the next part of the rule, which is set to False here.
matching = grammar.find_matching_rules("hello", False)
print("Matching rule: %s" % matching[0]) # first part of rule
# Go to the next part of the rule.
matching[0].set_next()
# Match the dictation part. This can be anything.
matching = grammar.find_matching_rules("world")
print("Matching rule: %s" % matching[0])
# The entire match and the original rule's current_match value will both be
'hello world'
print(matching[0].entire_match)
print(rule.expansion.current_match)
if __name__ == '__main__':
main()
| 1,316 | 398 |
"""!
@brief Integration-tests for Hierarchical Sync (HSyncNet) algorithm.
@authors Andrei Novikov (pyclustering@yandex.ru)
@date 2014-2020
@copyright BSD-3-Clause
"""
import unittest;
import matplotlib;
matplotlib.use('Agg');
from pyclustering.cluster.tests.hsyncnet_templates import HsyncnetTestTemplates;
from pyclustering.nnet import solve_type;
from pyclustering.samples.definitions import SIMPLE_SAMPLES;
from pyclustering.core.tests import remove_library;
class HsyncnetIntegrationTest(unittest.TestCase):
def testClusteringSampleSimple1WithoutCollectingByCore(self):
HsyncnetTestTemplates.templateClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, [5, 5], solve_type.FAST, 5, 0.3, False, True);
def testClusteringSampleSimple1ByCore(self):
HsyncnetTestTemplates.templateClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, [5, 5], solve_type.FAST, 5, 0.3, True, True);
def testClusteringOneAllocationSampleSimple1ByCore(self):
HsyncnetTestTemplates.templateClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 1, [10], solve_type.FAST, 5, 0.3, True, True);
def testClusteringSampleSimple2ByCore(self):
HsyncnetTestTemplates.templateClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 3, [10, 5, 8], solve_type.FAST, 5, 0.2, True, True);
def testClusteringOneAllocationSampleSimple2ByCore(self):
HsyncnetTestTemplates.templateClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 1, [23], solve_type.FAST, 5, 0.2, True, True);
def testClusteringOneDimensionDataSampleSimple7ByCore(self):
HsyncnetTestTemplates.templateClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, 2, [10, 10], solve_type.FAST, 5, 0.3, True, True);
def testClusteringTheSameData1ByCore(self):
HsyncnetTestTemplates.templateClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE12, 3, [5, 5, 5], solve_type.FAST, 5, 0.3, True, True);
def testDynamicLengthCollectingByCore(self):
HsyncnetTestTemplates.templateDynamicLength(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, None, 5, 0.3, True, True);
def testDynamicLengthWithoutCollectingByCore(self):
HsyncnetTestTemplates.templateDynamicLength(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, None, 5, 0.3, False, True);
def testProcessingWhenLibraryCoreRemoved(self):
self.runRemovedLibraryCoreTest()
@remove_library
def runRemovedLibraryCoreTest(self):
HsyncnetTestTemplates.templateClustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2, [5, 5], solve_type.FAST, 5, 0.3, False, True)
| 2,534 | 1,006 |
import common.assertions as assertions
def run(cfg):
assertions.validateAdminPassword(cfg) | 92 | 27 |
lista = [[], []]
valor = 0
for c in range(1, 8):
valor = int(input(f'Digite o {c}o. valor: '))
if valor % 2 == 0:
lista[0].append(valor)
else:
lista[1].append(valor)
print('-=' * 30)
print(f'Os valores pares digitados foram: {sorted(lista[0])}')
print(f'Os valores ímpres digitados foram: {sorted(lista[1])}')
# meu código (funcionou também):
# lista = [[], []]
# temp = []
# for c in range(1, 8):
# temp.append(int(input(f'Digite o {c}o valor: ')))
# if temp[c] % 2 == 0:
# lista[0].append(temp[c])
# else:
# lista[1].append(temp[c])
# print('-=' * 30)
# sorted(lista)
# print(f'Os valores pares digitados foram: {sorted(lista[0])}')
# print(f'Os valores ímpres digitados foram: {sorted(lista[1])}')
| 758 | 314 |
'''Top-k kendall-tau distance.
This module generalise kendall-tau as defined in [1].
It returns a distance: 0 for identical (in the sense of top-k) lists and 1 if completely different.
Example:
Simply call kendall_top_k with two same-length arrays of ratings (or also rankings), length of the top elements k (default is the maximum length possible), and p (default is 0, see [1]) as parameters:
import kendall
a = np.array([1,2,3,4,5])
b = np.array([5,4,3,2,1])
kendall.kendall_top_k(a,b,k=4)
Author: Alessandro Checco
https://github.com/AlessandroChecco
References
[1] Fagin, Ronald, Ravi Kumar, and D. Sivakumar. 'Comparing top k lists.' SIAM Journal on Discrete Mathematics 17.1 (2003): 134-160.
'''
# pylint: disable=E1101
# pylint incorrectly identifies some types as tuples
import math
import numpy as np
import scipy.stats as stats
import scipy.special as special
def kendall_top_k(a, b, k=None, kIsNonZero=False, p=0.5):
'''
kendall_top_k(np.array,np.array,k,p)
This function generalise kendall-tau as defined in
[1] Fagin, Ronald, Ravi Kumar, and D. Sivakumar. 'Comparing top k lists.' SIAM Journal on Discrete Mathematics 17.1 (2003): 134-160.
It returns a distance: 1 for identical (in the sense of top-k) lists and -1 if completely different.
Example:
Simply call it with two same-length arrays of ratings (or also rankings),
length of the top elements k (default is the maximum length possible), and p (default is 0, see [1]) as parameters:
$ a = np.array([1,2,3,4,5])
$ b = np.array([5,4,3,2,1])
$ kendall_top_k(a,b,k=4)
If the kIsNonZero option is True, k is set to the amount of non-zero values in a or b, depending on which has least.
'''
a = np.array(a)
b = np.array(b)
if kIsNonZero:
anz, bnz = np.count_nonzero(a), np.count_nonzero(b)
k = min(np.count_nonzero(a), np.count_nonzero(b))
#print('anz={}, bnz={}, k={}'.format(anz, bnz, k))
elif k is None:
k = a.size
if a.size != b.size:
raise NameError('The two arrays need to have same lengths')
k = min(k,a.size)
a_top_k = np.argpartition(a,-k)[-k:]
b_top_k = np.argpartition(b,-k)[-k:]
common_items = np.intersect1d(a_top_k,b_top_k)
only_in_a = np.setdiff1d(a_top_k, common_items)
only_in_b = np.setdiff1d(b_top_k, common_items)
# case 1
kendall = (1 - (stats.kendalltau(a[common_items], b[common_items])[0] / 2 + 0.5)) * common_items.size**2
if np.isnan(kendall): # degenerate case with only one item (not defined by Kendall)
#print('DEGENERATE CASE <= 1 in common')
kendall = 0
#case 2 (& 3 ?)
test = 0
for i in common_items:
for j in only_in_a:
if a[i] < a[j]:
test += 1
for j in only_in_b:
if b[i] < b[j]:
test += 1
kendall += test
# case 4
kendall += 2 * p * special.binom(k-common_items.size, 2)
# case 3
kendall /= (only_in_a.size + only_in_b.size + common_items.size)**2 #normalization
kendall = -2 * kendall + 1 # change to correct range
return (kendall, k)
| 3,216 | 1,222 |
import unittest
import numpy as np
import bayesnet as bn
class TestMatMul(unittest.TestCase):
def test_matmul(self):
x = np.random.rand(10, 3)
y = np.random.rand(3, 5)
g = np.random.rand(10, 5)
xp = bn.Parameter(x)
z = xp @ y
self.assertTrue((z.value == x @ y).all())
z.backward(g)
self.assertTrue((xp.grad == g @ y.T).all())
yp = bn.Parameter(y)
z = x @ yp
self.assertTrue((z.value == x @ y).all())
z.backward(g)
self.assertTrue((yp.grad == x.T @ g).all())
if __name__ == '__main__':
unittest.main()
| 619 | 242 |
class char:
def __init__(self):
self.str = 15
self.dex = 15
self.con = 14
self.wis = 15
self.int = 15
self.cha = 15
def raise_stat(self):
stats = [self.str, self.dex, self.con, self.int, self.wis, self.cha]
min_stat = min(stats)
for index, value in enumerate(stats):
if value == min_stat:
if index == 0:
#self.verbose_log += f"Free 14 - Raised Strength from {self.str} to 14."
self.str = 14
break
elif index == 1:
#self.verbose_log += f"Free 14 - Raised Dexterity from {self.dex} to 14."
self.dex = 14
break
elif index == 2:
#self.verbose_log += f"Free 14 - Raised Constitution from {self.con} to 14."
self.con = 14
break
elif index == 3:
#self.verbose_log += f"Free 14 - Raised Intelligence from {self.int} to 14."
self.int = 14
break
elif index == 4:
#self.verbose_log += f"Free 14 - Raised Wisdom from {self.wis} to 14."
self.wis = 14
break
elif index == 5:
#self.verbose_log += f"Free 14 - Raised Charisma from {self.cha} to 14."
self.cha = 14
break
print("Prints after for loop")
def change_stat(self):
self.cha = 15
newchar = char()
#newchar.raise_stat()
# print(newchar.cha)
# newchar.raise_stat()
# print(newchar.cha)
#
# class_type = None
# list(class_type)
# print(class_type, type(class_type))
#
# listthing = [0, 1, 2, 3, 4, 5]
#
# for i in listthing:
# if i == 2:
# listthing.append(1)
# elif i == 1:
# print("I FOUND A ONE! HOPEFULLY I'LL FIND ANOTHER")
# elif i == 3:
# listthing.remove(i)
# print(listthing)
#
# import random
#
# featuredict = {1:"Amphibian",2:"Bird",3:"Fish",4:"Insect",5:"Mammal",6:"Reptile",7:"Spider",8:"Exotic"}
# print(random.choice(featuredict))
def returns_tuple():
a = 5
b = 6
c = 7
return a, b, c
print("Function output", returns_tuple())
x, y, z = returns_tuple()
print("x =", x)
print("y =", y)
print("z =", z) | 2,387 | 851 |
from kivy.app import App
from kivy.config import Config
Config.set('graphics', 'width', '500')
Config.set('graphics', 'height', '640')
Config.set('graphics', 'resizable', False)
Config.set('kivy','window_icon','data/friday/res/icon.ico')
#from kivy.core.window import Window
from kivy.uix.screenmanager import ScreenManager, Screen, FadeTransition
from kivy.properties import StringProperty, BooleanProperty
import h5py
import os
root_path = os.path.realpath(__file__)
root_path = root_path[:len(root_path)- 27]
from kivy.core.window import Window
g_user = None
dbg = None
class Manager(ScreenManager):
pass
class Greet(Screen):
pass
class Register(Screen):
first = StringProperty("")
last = StringProperty("")
user = StringProperty("")
passwd = StringProperty("")
passwd2 = StringProperty("")
message = StringProperty("")
def __init__(self, **kwargs):
super(Register, self).__init__(**kwargs)
self.database = dbg
def sign_up_click(self):
global g_user
global dbg
self.database = dbg
for entry in self.database:
if entry[2] == self.user:
self.message = "Username taken. Use something else."
self.first = ""
self.last = ""
self.user = ""
self.passwd = ""
self.passwd2 = ""
return
if self.passwd != self.passwd2:
self.message = "Passwords don't match."
self.first = ""
self.last = ""
self.user = ""
self.passwd = ""
self.passwd2 = ""
return
if self.user == "":
self.message = "Username can't be empty"
self.first = ""
self.last = ""
self.user = ""
self.passwd = ""
self.passwd2 = ""
return
if self.first == "":
self.message = "First name can't be empty"
self.first = ""
self.last = ""
self.user = ""
self.passwd = ""
self.passwd2 = ""
return
if self.last == "":
self.message = "Last name can't be empty"
self.first = ""
self.last = ""
self.user = ""
self.passwd = ""
self.passwd2 = ""
return
if self.passwd == "":
self.message = "Password can't be empty"
self.first = ""
self.last = ""
self.user = ""
self.passwd = ""
self.passwd2 = ""
return
self.database.append([str(self.first), str(self.last), str(self.user), str(self.passwd), "0" * 100, ""])
for i in range(len(self.database)):
self.database[i] = [attrib.encode("utf8") for attrib in self.database[i]]
with h5py.File(str(root_path + "data/friday/users.hdf5"), "w") as users_file:
d = users_file.create_dataset("users", data=self.database)
liked = [[str(self.user).encode("utf8"), str("0" * 100).encode("utf8"), "".encode("utf8")]]
with h5py.File(str(root_path + "data/friday/likes.hdf5"), "a") as file:
file.create_dataset(str(self.user), data=liked)
disliked = [[str(self.user).encode("utf8"), str("0" * 100).encode("utf8"), "".encode("utf8")]]
with h5py.File(str(root_path + "data/friday/dislikes.hdf5"), "a") as file:
file.create_dataset(str(self.user), data=disliked)
for i in range(len(self.database)):
self.database[i] = [attrib.decode("utf8") for attrib in self.database[i]]
dbg = self.database
self.message = "Account created!"
self.first = ""
self.last = ""
self.user = ""
self.passwd = ""
self.passwd2 = ""
class Signin(Screen):
user = StringProperty("")
passwd = StringProperty("")
message = StringProperty("")
remember = BooleanProperty(True)
def __init__(self, **kwargs):
super(Signin, self).__init__(**kwargs)
self.database = dbg
self.remember = True
def sign_in_click(self):
global g_user
global dbg
self.database = dbg
for entry in self.database:
if entry[2] == self.user and entry[3] == self.passwd:
g_user["first_name"] = entry[0]
g_user["last_name"] = entry[1]
g_user["username"] = self.user
g_user["logged_in"] = True if self.remember else False
g_user["access_key"] = entry[4]
g_user["access_secret"] = entry[5]
App.get_running_app().stop()
return
self.message = "Incorrect username or password. Try again."
self.user = ""
self.passwd = ""
class Login(App):
def build(self):
self.icon = 'data/friday/res/icon.ico'
return Manager()
def login(user):
global g_user
g_user = user
global dbg
with h5py.File(str(root_path + "data/friday/users.hdf5"), "r") as users_file:
database = users_file["users"]
database = list(database)
for i in range(len(database)):
database[i] = [attrib.decode("utf8") for attrib in database[i]]
dbg = database
Login().run()
#Window.close()
return g_user
if __name__ == "__main__":
login({})
| 4,519 | 1,844 |
fig, ax = plt.subplots()
# Add a bar for the rowing "Height" column mean/std
ax.bar("Rowing", mens_rowing["Height"].mean(), yerr=mens_rowing["Height"].std())
# Add a bar for the gymnastics "Height" column mean/std
ax.bar("Gymnastics", mens_gymnastics["Height"].mean(), yerr=mens_gymnastics["Height"].std())
# Label the y-axis
ax.set_ylabel("Height (cm)")
plt.show() | 380 | 149 |
from __future__ import annotations
from itests.pages.base import BaseModal, BasePage
class RoleUserViewPage(BasePage):
def click_disable_button(self) -> None:
button = self.find_element_by_id("disable-role-user")
button.click()
def get_disable_modal(self) -> DisableRoleUserModal:
element = self.find_element_by_id("disableModal")
self.wait_until_visible(element)
return DisableRoleUserModal(element)
class DisableRoleUserModal(BaseModal):
pass
| 503 | 152 |
import tensorflow as tf
import numpy as np
import parse
def lstm_cell(size_layer, state_is_tuple = True):
return tf.nn.rnn_cell.LSTMCell(size_layer, state_is_tuple = state_is_tuple)
def generate(sess, sequence, noise, model, tag, length_sentence, text_vocab):
sentence_generated = []
onehot = parse.embed_to_onehot(tag, text_vocab)
upper_b = (len(tag) // sequence) * sequence
last_state = None
for i in xrange(0, (len(tag) // sequence) * sequence, sequence):
tokens = onehot[i:i+sequence,:]
batch_fake = np.zeros((1, sequence, len(text_vocab)))
batch_fake[0,:,:] = tokens
try:
prob,last_state = sess.run([model.final_outputs_generator, model.last_state_generator], feed_dict = {model.initial_layer: last_state, model.fake_input: batch_fake})
except Exception as e:
prob,last_state = sess.run([model.final_outputs, model.last_state], feed_dict = {model.noise: noise, model.fake_input: batch_fake})
for i in range(prob.shape[1]):
word = np.random.choice(range(len(text_vocab)), p = prob[0, i,:])
element = text_vocab[word]
sentence_generated.append(element)
if onehot[upper_b:,:].shape[0] > 0:
tokens = onehot[upper_b:,:]
batch_fake = np.zeros((1, onehot[upper_b:,:].shape[0], len(text_vocab)))
batch_fake[0,:,:] = tokens
try:
prob,last_state = sess.run([model.final_outputs_generator, model.last_state_generator], feed_dict = {model.initial_layer: last_state, model.fake_input: batch_fake})
except Exception as e:
prob,last_state = sess.run([model.final_outputs, model.last_state], feed_dict = {model.noise: noise, model.fake_input: batch_fake})
for i in range(prob.shape[1]):
word = np.random.choice(range(len(text_vocab)), p = prob[0, i,:])
element = text_vocab[word]
sentence_generated.append(element)
for i in xrange(length_sentence):
onehot = parse.embed_to_onehot(sentence_generated[-sequence:], text_vocab)
batch_fake = np.zeros((1, onehot.shape[0], len(text_vocab)))
batch_fake[0,:,:] = onehot
prob,last_state = sess.run([model.final_outputs_generator, model.last_state_generator], feed_dict = {model.initial_layer: last_state, model.fake_input: batch_fake})
word = np.random.choice(range(len(text_vocab)), p = prob[0, -1,:])
element = text_vocab[word]
sentence_generated.append(element)
return sentence_generated
def discriminator(X, num_layers, size_layer, dimension_input, reuse = False):
with tf.variable_scope("discriminator", reuse = reuse):
rnn_cells = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(size_layer) for _ in xrange(num_layers)])
outputs, last_state = tf.nn.dynamic_rnn(rnn_cells, X, dtype = tf.float32)
rnn_W = tf.Variable(tf.random_normal((size_layer, 1)))
rnn_B = tf.Variable(tf.random_normal([1]))
return tf.matmul(outputs[:,-1], rnn_W) + rnn_B
def generator_encode(X, num_layers, size_layer, len_noise, reuse = False):
with tf.variable_scope("generator_encode", reuse = reuse):
rnn_cells = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(size_layer, False) for _ in xrange(num_layers)], state_is_tuple = False)
_, final_state = tf.nn.dynamic_rnn(rnn_cells, X, dtype = tf.float32)
return final_state
def generator_sentence(X, hidden_layer, num_layers, size_layer, dimension_input, name_scope, reuse=False):
with tf.variable_scope(name_scope, reuse = reuse):
rnn_cells = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(size_layer, False) for _ in xrange(num_layers)], state_is_tuple = False)
outputs, last_state = tf.nn.dynamic_rnn(rnn_cells, X, initial_state = hidden_layer, dtype = tf.float32)
seq_shape = tf.shape(outputs)
rnn_W = tf.Variable(tf.random_normal((size_layer, dimension_input)))
rnn_B = tf.Variable(tf.random_normal([dimension_input]))
logits = tf.matmul(tf.reshape(outputs, [-1, size_layer]), rnn_W) + rnn_B
return tf.reshape(tf.nn.softmax(logits), (seq_shape[0], seq_shape[1], dimension_input)), last_state, logits
class Model:
def __init__(self, num_layers, size_layer, dimension_input, len_noise, sequence_size, learning_rate):
self.noise = tf.placeholder(tf.float32, [None, None, len_noise])
self.fake_input = tf.placeholder(tf.float32, [None, None, dimension_input])
self.true_sentence = tf.placeholder(tf.float32, [None, None, dimension_input])
self.initial_layer = generator_encode(self.noise, num_layers, size_layer, len_noise)
self.final_outputs, self.last_state, _ = generator_sentence(self.fake_input, self.initial_layer, num_layers, size_layer, dimension_input, 'generator_sentence')
fake_logits = discriminator(self.final_outputs, num_layers, size_layer, dimension_input)
true_logits = discriminator(self.true_sentence, num_layers, size_layer, dimension_input, reuse = True)
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = true_logits, labels = tf.ones_like(true_logits)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = fake_logits, labels = tf.zeros_like(fake_logits)))
self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = fake_logits, labels = tf.ones_like(fake_logits)))
self.d_loss = d_loss_real + d_loss_fake
d_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = 'discriminator')
g_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = 'generator_encode') + tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = 'generator_sentence')
self.d_train_opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(self.d_loss, var_list = d_vars)
self.g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(self.g_loss, var_list = g_vars)
self.final_outputs_generator, self.last_state_generator, logits_generator = generator_sentence(self.final_outputs[:,:-1,:], self.last_state, num_layers, size_layer, dimension_input, 'generator_sequence')
y_batch_long = tf.reshape(self.final_outputs[:,1:,:], [-1, dimension_input])
self.seq_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits_generator, labels = y_batch_long))
seq_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope = 'generator_sequence')
self.seq_opt = tf.train.RMSPropOptimizer(0.01, 0.9).minimize(self.seq_loss, var_list = seq_vars)
| 6,141 | 2,412 |
from mmr2web.models import *
import datetime
def get_payments_file(nok_per_usd=9.1412):
"""Default exchange rate taken from Norges Bank, Nov 22, 2019."""
payments_out = open("payments_mmrisk.csv", "w")
payments_out.write("amount,message\n")
total_payment = 0
for s in Situation.objects.filter(selected=True):
if s.choice_risk:
amount = DICE[s.die.dienumber]['eyes'][s.draw-1] / nok_per_usd
message = "In mmr2 - someone decided to throw a dice on your behalf."
if amount==0:
amount=0.01
message = "In mmr - someone decided to throw a dice on your behalf and you were unlucky."
else:
amount = s.safe_amount / nok_per_usd
message = "In mmr2 - someone decided for the safe amount on your behalf."
payments_out.write("%3.2f,%s\n" % (amount, message))
total_payment += amount
payments_out.close()
return total_payment
get_payments_file()
| 987 | 327 |
# Test Event class
try:
import uasyncio as asyncio
except ImportError:
print("SKIP")
raise SystemExit
import micropython
try:
micropython.schedule
except AttributeError:
print("SKIP")
raise SystemExit
try:
# Unix port can't select/poll on user-defined types.
import uselect as select
poller = select.poll()
poller.register(asyncio.ThreadSafeFlag())
except TypeError:
print("SKIP")
raise SystemExit
async def task(id, flag):
print("task", id)
await flag.wait()
print("task", id, "done")
def set_from_schedule(flag):
print("schedule")
flag.set()
print("schedule done")
async def main():
flag = asyncio.ThreadSafeFlag()
# Set the flag from within the loop.
t = asyncio.create_task(task(1, flag))
print("yield")
await asyncio.sleep(0)
print("set event")
flag.set()
print("yield")
await asyncio.sleep(0)
print("wait task")
await t
# Set the flag from scheduler context.
print("----")
t = asyncio.create_task(task(2, flag))
print("yield")
await asyncio.sleep(0)
print("set event")
micropython.schedule(set_from_schedule, flag)
print("yield")
await asyncio.sleep(0)
print("wait task")
await t
# Flag already set.
print("----")
print("set event")
flag.set()
t = asyncio.create_task(task(3, flag))
print("yield")
await asyncio.sleep(0)
print("wait task")
await t
asyncio.run(main())
| 1,488 | 521 |
import unittest
from ortec.scientific.benchmarks.loadbuilding.instance.ThreeDitemkind import ThreeDitemkind
class TestItemkind(unittest.TestCase):
def setUp(self):
self.item_kind = ThreeDitemkind()
self.item_kind.id = 1
self.item_kind.quantity = 10
self.item_kind.boundingBox = [10,10,10]
self.item_kind.orientations = set(['LWH', 'WLH'])
def test_itemkind_valid(self):
self.assertTrue(self.item_kind.IsValid()[0])
def test_itemkind_orientationstring(self):
self.assertEqual(self.item_kind.GetOrientationString(), "LWH,WLH")
def test_itemkind_invalid_id1(self):
self.item_kind.id = None
self.assertFalse(self.item_kind.IsValid()[0])
def test_itemkind_invalid_id2(self):
self.item_kind.id = "1"
self.assertFalse(self.item_kind.IsValid()[0])
def test_itemkind_invalid_boundingbox1(self):
self.item_kind.boundingBox = None
self.assertFalse(self.item_kind.IsValid()[0])
def test_itemkind_invalid_boundingbox2(self):
self.item_kind.boundingBox = "0,0,0"
self.assertFalse(self.item_kind.IsValid()[0])
def test_itemkind_invalid_boundingbox3(self):
self.item_kind.boundingBox[0] = None
self.assertFalse(self.item_kind.IsValid()[0])
def test_itemkind_invalid_boundingbox4(self):
self.item_kind.boundingBox[0] = "a"
self.assertFalse(self.item_kind.IsValid()[0])
def test_itemkind_invalid_boundingbox5(self):
self.item_kind.boundingBox[0] = -1
self.assertFalse(self.item_kind.IsValid()[0])
def test_itemkind_invalid_quantity1(self):
self.item_kind.quantity = None
self.assertFalse(self.item_kind.IsValid()[0])
def test_itemkind_invalid_quantity2(self):
self.item_kind.quantity = "1"
self.assertFalse(self.item_kind.IsValid()[0])
def test_itemkind_invalid_quantity3(self):
self.item_kind.quantity = -1
self.assertFalse(self.item_kind.IsValid()[0])
def test_itemkind_invalid_orientations1(self):
self.item_kind.orientations = None
self.assertFalse(self.item_kind.IsValid()[0])
def test_itemkind_invalid_orientations2(self):
self.item_kind.orientations = "LWH,HLW"
self.assertFalse(self.item_kind.IsValid()[0])
def test_itemkind_invalid_orientations3(self):
self.item_kind.orientations = set(["LLL"])
self.assertFalse(self.item_kind.IsValid()[0])
def test_itemkind_typestring(self):
self.assertEqual(self.item_kind.TypeString(), "itemkind")
def test_itemkind_ne(self):
self.assertNotEqual(self.item_kind, ThreeDitemkind())
TestCase = unittest.TestSuite()
TestCase.addTest(unittest.TestLoader().loadTestsFromTestCase(TestItemkind)) | 2,861 | 1,019 |
from .structs import GameNode
| 30 | 9 |
import socket
import ipaddress
def net_family(net):
if isinstance(ipaddress.ip_network(net, strict=False),
ipaddress.IPv6Network):
return socket.AF_INET6
return socket.AF_INET
def flag6(net):
return '-6' if net_family(net) == socket.AF_INET6 else ''
| 292 | 99 |
#
# Copyright John Reid 2006
#
from _biopsy import *
def _hit_str( hit ):
return ",".join( [
hit.binder,
str( hit.location.position ),
str( hit.location.positive_strand ),
str( hit.p_binding )
] )
Hit.__str__ = _hit_str
def _location_start( location ):
return location.position
HitLocation.start = _location_start
def _location_end( location ):
return location.position + location.length
HitLocation.end = _location_end
def _location_overlap( location1, location2 ):
"""Do two hits overlap?"""
if location1.position < location2.position:
return location1.end() > location2.position
else:
return location2.end() > location1.position
HitLocation.overlap = _location_overlap
def _location_separation( location1, location2 ):
"""The separation between two locations"""
if location1.position >= location2.end():
return location1.position - location2.end()
else:
return location2.position - location1.end()
HitLocation.separation = _location_separation
def _hits_str( hits ):
return '\n'.join( [ str( hit ) for hit in hits ] )
HitVec.__str__ = _hits_str
def get_char_for_hit( hit ):
return hit.binder
def get_score_for_hit( hit ):
# return math.log( hit.p_binding )
return hit.p_binding
def get_max_p_binding_over_hits( hits ):
"""Takes a list of hits and returns a dictionary mapping binder names to max( p(binding) ) across all hits"""
result = { }
for hit in hits:
if not result.has_key( hit.binder ) or result[hit.binder] < hit.p_binding:
result[hit.binder] = hit.p_binding
return result
def find_pair_in_analysis(
analysis,
pair,
max_separation = None,
separation = None
):
"""Finds in which analyses a pair of TFs bind
analysis: Analysis
pair: A tuple ( binder1, binder2, orientation1, orientation2 )
max_separation: If specified determines maximum separation
separation: If specified determines exact separation (over-rides max_separation)
Returns a list of keys for the analyses
"""
result = { }
for k in analysis.get_keys():
hits = analysis.get_hits_for( k )
found_pairs = find_pair_in_hits( hits, pair, max_separation, separation )
if found_pairs:
result[ k ] = found_pairs
return result
def find_pair_in_hits(
hits,
pair,
max_separation = None,
separation = None
):
"""Finds the locations where a pair of TFs bind in a sequence of hits
hits: The hits
pair: A tuple ( binder1, binder2, orientation1, orientation2 )
max_separation: If specified determines maximum separation
separation: If specified determines exact separation (overrides max_separation)
returns a sequence of pairs of hits that satisfy the criteria
"""
( binder1, binder2, orientation1, orientation2 ) = pair
result = [ ]
for h1 in hits:
if binder1 != h1.binder: continue
for h2 in hits:
if binder2 != h2.binder: continue
if h1.location.overlap( h2.location ): continue
distance = h1.location.separation( h2.location )
if None != separation and separation != distance: continue
if None != max_separation and max_separation < distance: continue
if h1.location.position < h2.location.position:
if (
h1.location.positive_strand != orientation1
or
h2.location.positive_strand != orientation2
): continue
else:
if (
h1.location.positive_strand == orientation1
or
h2.location.positive_strand == orientation2
): continue
result.append( ( h1, h2 ) )
return result
def hit_over_threshold_predicate(threshold):
"@return: A function that returns True if the hit is over the threshold given."
def predicate(hit):
"@return: True iff the hit's score is above the threshold."
return hit.p_binding >= threshold
return predicate
def hits_above_threshold(hits, threshold):
"@return: Those hits above the threshold."
return filter(hit_over_threshold_predicate(threshold), hits)
| 4,370 | 1,268 |
# -*- coding: utf-8 -*-
import click
from github import Github
from github.GithubException import RateLimitExceededException
def main():
cli(obj={})
def get_repos(key, org, repo, url):
if url:
g = Github(key, base_url=url)
else:
g = Github(key)
if org:
g_org = g.get_organization(login=org)
else:
g_org = g.get_user()
if repo:
repos = [g_org.get_repo(repo)]
else:
repos = g_org.get_repos()
return repos
@click.group()
@click.option('--key', envvar='EPITHET_KEY', help="Github OAuth Token")
@click.option('--dryrun', is_flag=True, help="Don't actually change or create labels")
@click.option('--url', help="API URL - change if GitHub Enterprise")
@click.pass_context
def cli(ctx, key, dryrun, url):
if not key:
click.echo("You must provide a GitHub API v3 key")
return
ctx.obj['dryrun'] = dryrun
ctx.obj['url'] = url
ctx.obj['key'] = key
@cli.command()
@click.option('--label', '-l', is_flag=True, help="List labels", default=False)
@click.option('--milestone', '-m', is_flag=True, help='List milestones', default=False)
@click.option('--org', '-o', help="Organization to get repos from")
@click.option('--repo', '-r', help="Optionally select a single repo")
@click.pass_context
def list(ctx, label, milestone, org, repo):
if not label and not milestone:
click.echo("--label or --milestone required")
return
for repo in get_repos(ctx.obj['key'], org, repo, ctx.obj['url']):
click.echo("\n * {}:\n".format(repo.name))
if label:
for label in repo.get_labels():
click.echo(" - {} ({})".format(label.name, label.color))
if milestone:
for milestone in repo.get_milestones():
click.echo(" - {} ({})".format(milestone.title))
@cli.command()
@click.option('--label', '-l', is_flag=True, help="Add label", default=False)
@click.option('--milestone', '-m', is_flag=True, help='Add milestone', default=False)
@click.option('--org', '-o', help="Organization")
@click.option('--repo', '-r', help="Optionally select a single repo")
@click.option('--name', '-n', help="Name of new label")
@click.option('--color', '-c', help="Color of new label")
@click.pass_context
def add(ctx, label, milestone, org, repo, name, color):
if not label and not milestone:
click.echo("--label or --milestone required")
return
for repo in get_repos(ctx.obj['key'], org, repo, ctx.obj['url']):
click.echo(" * Checking {}".format(repo.name))
if label:
click.echo("Adding a label with name: {} and color: {}".format(name, color))
labels = {label.name: label for label in repo.get_labels()}
if name.lower() in [l.lower() for l in labels.keys()]:
click.echo(
" - Found {} on {} (Dryrun: {})".format(
name, repo.name, ctx.obj['dryrun']
)
)
if name not in labels.keys():
for labelname, label in labels.items():
if labelname.lower() == name.lower():
labels[labelname].edit(name=name, color=color)
elif labels[name].color != color and not ctx.obj['dryrun'] \
and not repo.archived:
labels[name].edit(name=name, color=color)
else:
click.echo(
" - Creating {} on {} (Dryrun: {})".format(
name, repo.name, ctx.obj['dryrun']
)
)
if not ctx.obj['dryrun'] and not repo.archived:
repo.create_label(name=name, color=color)
if milestone:
click.echo("Adding a milestone with name: {}".format(name))
milestones = {milestone.title: milestone
for milestone in repo.get_milestones()}
if name.lower() in [m.lower() for m in milestones.keys()]:
click.echo(
" - Found {} on {} (Dryrun: {})".format(
name, repo.name, ctx.obj['dryrun']
)
)
else:
click.echo(
" - Creating {} on {} (Dryrun: {})".format(
name, repo.name, ctx.obj['dryrun']
)
)
if not ctx.obj['dryrun'] and not repo.archived:
repo.create_milestone(title=name)
@cli.command()
@click.option('--label', '-l', is_flag=True, help="Delete label", default=False)
@click.option('--milestone', '-m', is_flag=True, help='Delete milestones', default=False)
@click.option('--org', '-o', help="Organization")
@click.option('--repo', '-r', help="Optionally select a single repo")
@click.option('--name', '-n', help="Name of label or milestone to delete")
@click.pass_context
def delete(ctx, label, milestone, org, repo, name):
if not label and not milestone:
click.echo("--label or --milestone required")
return
for repo in get_repos(ctx.obj['key'], org, repo, ctx.obj['url']):
click.echo(" * Checking {}".format(repo.name))
if label:
click.echo("Deleting label: {}".format(name))
labels = {}
for label in repo.get_labels():
labels[label.name] = label
if name in labels:
click.echo(
" - Found {} on {}, deleting (Dryrun: {})".format(
labels[name].name, repo.name, ctx.obj['dryrun']
)
)
if not ctx.obj['dryrun']:
labels[name].delete()
if milestone:
click.echo("Deleting milestone: {}".format(name))
milestones = {}
for milestone in repo.get_milestones():
milestones[milestone.title] = milestone
if name in milestones:
click.echo(
" - Found {} on {}, deleting (Dryrun: {})".format(
milestones[name].title, repo.name, ctx.obj['dryrun']
)
)
if not ctx.obj['dryrun']:
milestones[name].delete()
@cli.command()
@click.option('--label', '-l', is_flag=True, help="Update label", default=False)
@click.option('--milestone', '-m', is_flag=True, help='Update milestone', default=False)
@click.option('--org', '-o', help="Organization")
@click.option('--repo', '-r', help="Optionally select a single repo")
@click.option('--name', '-n', help="Name of the existing label")
@click.option('--new-name', help="New name of the label")
@click.pass_context
def update(ctx, label, milestone, org, repo, name, new_name):
if not label and not milestone:
click.echo("--label or --milestone required")
return
for repo in get_repos(ctx.obj['key'], org, repo, ctx.obj['url']):
click.echo(" * Checking {}".format(repo.name))
if label:
click.echo("Updating label {}".format(name))
labels = {}
for label in repo.get_labels():
labels[label.name] = label
if name in labels:
click.echo(
" - Found {} on {}, upating to {} (Dryrun: {})".format(
labels[name].name, repo.name, new_name, ctx.obj['dryrun']
)
)
if labels[name].name != new_name and not ctx.obj['dryrun']:
labels[name].edit(name=new_name, color=labels[name].color)
else:
click.echo("{} not found, did you mean 'add'?".format(name))
if milestone:
click.echo("Updating milestone with name: {}".format(name))
milestones = {}
for milestone in repo.get_milestones():
milestones[milestone.title] = milestone
if name in milestones:
click.echo(
" - Found {} on {}, upating to {} (Dryrun: {})".format(
milestones[name].name, repo.name, new_name, ctx.obj['dryrun']
)
)
else:
click.echo("{} not found, did you mean 'add'?".format(name))
if __name__ == "__main__":
main(obj={})
| 8,384 | 2,482 |
import numpy as np
def normalize(matrix, nh=1, nl=0):
"""Normalizes each column in a matrix by calculating its maximum
and minimum values, the parameters nh and nl specify the final range
of the normalized values"""
return (matrix - matrix.min(0)) * ((nh - nl) / matrix.ptp(0)) + nl
def one_hot_encoding(array):
"""Encodes each unique label in 'array' in a vector of the same length as
the number of unique labels. This vector is filled with zeros and a 1
representing the position assigned to the label"""
labels = np.unique(array)
number_of_labels = labels.size
encoded = {}
for i in range(number_of_labels):
encoding = np.zeros(number_of_labels)
encoding[i] = 1
encoded[labels[i]] = encoding
return encoded
def encode(array, encoding):
"""Encodes 'array' with the encoding specified in encoding.
This value must be a dictionary"""
encoded = []
for i in array:
encoded.append(encoding[i])
return encoded
def load_data_wrapper(name, input_cols, output_col, output_type="float", delimiter=None):
"""Wrapper to load the desired data in an easier way. It returns the normalized and encoded
data, alongside with the size of the values in the inputs and outputs to initialize
the neural network correctly"""
data_x = np.loadtxt(name, usecols=input_cols, delimiter=delimiter)
data_x = normalize(data_x)
data_y = np.loadtxt(name, usecols=output_col, delimiter=delimiter, dtype=output_type)
encoding = one_hot_encoding(data_y)
data_y = encode(data_y, encoding)
# x_len will be the number of input neurons, and y_len the number of output neurons
x_len = np.shape(data_x)[1]
y_len = np.shape(data_y)[1]
data = [[np.reshape(x, (x_len, 1)), np.reshape(y, (y_len, 1))] for x, y in zip(data_x, data_y)]
return data, x_len, y_len
| 1,876 | 588 |
from pyimagesearch.searcher import Searcher
from pyimagesearch.utils import *
import pytest
indexPath = "D:/APP/cbis/"
verbose = True
#test Search class
@pytest.fixture
def searcher():
return Searcher(indexPath, verbose)
pred_file = "D://APP//cbis//tests//out//predictions_test.csv"
top_k = 20
def test_search_gun( searcher ):
threshold = 0.50
image_list = searcher.search_gun(pred_file, top_k, threshold)
assert len(image_list) == 1
assert image_list[0][3] == 'gun'
def test_search_not_gun(searcher):
threshold = 0.70
search_list = ['wooden_spoon']
image_list = searcher.search_list(pred_file,search_list, top_k, threshold)
assert len(image_list) == 2
assert image_list[0][3] == 'wooden_spoon'
def test_search_not_gun1(searcher):
threshold = 0.80
search_list = ['wooden_spoon', 'revolver']
image_list = searcher.search_list(pred_file,search_list, top_k, threshold)
assert len(image_list) == 1
assert image_list[0][3] == 'revolver'
def test_search_not_gun2(searcher):
threshold = 0.70
search_list = ['wooden_spoon', 'revolver']
image_list = searcher.search_list(pred_file,search_list, top_k, threshold)
assert len(image_list) == 3
assert image_list[0][3] == 'wooden_spoon'
assert image_list[2][3] == 'revolver'
| 1,304 | 486 |
"""
Provides functions for working with NTFS volumes
Author: Harel Segev
05/16/2020
"""
from construct import Struct, Padding, Computed, IfThenElse, BytesInteger, Const, Enum, Array, FlagsEnum, Switch, Tell
from construct import PaddedString, Pointer, Seek, Optional, StopIf, RepeatUntil, Padded
from construct import Int8ul, Int16ul, Int32ul, Int64ul, Int8sl
from dataruns import get_dataruns, NonResidentStream
from sys import exit as sys_exit
class EmptyNonResidentAttributeError(ValueError):
pass
BOOT_SECTOR = Struct(
"OffsetInImage" / Tell,
Padding(3),
"Magic" / Optional(Const(b'NTFS')),
StopIf(lambda this: this.Magic is None),
Padding(4),
"BytsPerSec" / Int16ul,
"SecPerClus" / Int8ul,
"BytsPerClus" / Computed(lambda this: this.BytsPerSec * this.SecPerClus),
Padding(34),
"MftClusNumber" / Int64ul,
Padding(8),
"BytsOrClusPerRec" / Int8sl,
"BytsPerRec" / IfThenElse(
lambda this: this.BytsOrClusPerRec > 0,
Computed(lambda this: this.BytsOrClusPerRec * this.BytsPerClus),
Computed(lambda this: 2 ** abs(this.BytsOrClusPerRec)),
),
Padding(3),
"BytsOrClusPerIndx" / Int8sl,
"BytsPerIndx" / IfThenElse(
lambda this: this.BytsOrClusPerIndx > 0,
Computed(lambda this: this.BytsOrClusPerIndx * this.BytsPerClus),
Computed(lambda this: 2 ** abs(this.BytsOrClusPerIndx)),
),
"BytsPerMftChunk" / IfThenElse(
lambda this: this.BytsPerClus > this.BytsPerRec,
Computed(lambda this: this.BytsPerClus),
Computed(lambda this: this.BytsPerRec)
),
)
FILE_REFERENCE = Struct(
"FileRecordNumber" / BytesInteger(6, swapped=True, signed=False),
"SequenceNumber" / Int16ul
)
FILE_RECORD_HEADER = Struct(
"OffsetInChunk" / Tell,
"Magic" / Optional(Const(b'FILE')),
StopIf(lambda this: this.Magic is None),
"UpdateSequenceOffset" / Int16ul,
"UpdateSequenceSize" / Int16ul,
Padding(8),
"SequenceNumber" / Int16ul,
Padding(2),
"FirstAttributeOffset" / Int16ul,
"Flags" / FlagsEnum(Int16ul, IN_USE=1, DIRECTORY=2),
Padding(8),
"BaseRecordReference" / FILE_REFERENCE,
Seek(lambda this: this.UpdateSequenceOffset + this.OffsetInChunk),
"UpdateSequenceNumber" / Int16ul,
"UpdateSequenceArray" / Array(lambda this: this.UpdateSequenceSize - 1, Int16ul)
)
FILE_RECORD_HEADERS = Struct(
"RecordHeaders" / Array(
lambda this: this._.records_per_chunk,
Padded(lambda this: this._.bytes_per_record, FILE_RECORD_HEADER)
)
)
ATTRIBUTE_HEADER = Struct(
"EndOfRecordSignature" / Optional(Const(b'\xFF\xFF\xFF\xFF')),
StopIf(lambda this: this.EndOfRecordSignature is not None),
"OffsetInChunk" / Tell,
"Type" / Enum(Int32ul, FILE_NAME=0x30, INDEX_ALLOCATION=0xA0, DATA=0x80),
"Length" / Int32ul,
"Residence" / Enum(Int8ul, RESIDENT=0x00, NON_RESIDENT=0x01),
"NameLength" / Int8ul,
"NameOffset" / Int16ul,
"AttributeName" / Pointer(lambda this: this.NameOffset + this.OffsetInChunk,
PaddedString(lambda this: 2 * this.NameLength, "utf16")),
Padding(4),
"Metadata" / Switch(
lambda this: this.Residence,
{
"RESIDENT":
Struct(
"AttributeLength" / Int32ul,
"AttributeOffset" / Int16ul,
),
"NON_RESIDENT":
Struct(
Padding(16),
"DataRunsOffset" / Int16ul,
Padding(6),
"AllocatedSize" / Int64ul,
"RealSize" / Int64ul,
)
}
),
Seek(lambda this: this.Length + this.OffsetInChunk)
)
ATTRIBUTE_HEADERS = Struct(
Seek(lambda this: this._.offset),
"AttributeHeaders" / RepeatUntil(lambda obj, lst, ctx: obj.EndOfRecordSignature is not None, ATTRIBUTE_HEADER)
)
FILENAME_ATTRIBUTE = Struct(
"ParentDirectoryReference" / FILE_REFERENCE,
Padding(56),
"FilenameLengthInCharacters" / Int8ul,
"FilenameNamespace" / Enum(Int8ul, POSIX=0, WIN32=1, DOS=2, WIN32_DOS=3),
"FilenameInUnicode" / PaddedString(lambda this: this.FilenameLengthInCharacters * 2, "utf16")
)
def get_boot_sector(raw_image, partition_offset):
raw_image.seek(partition_offset)
return BOOT_SECTOR.parse_stream(raw_image)
def panic_on_invalid_boot_sector(vbr):
if vbr["Magic"] is None:
sys_exit("INDXRipper: error: invalid volume boot record")
def get_mft_offset(vbr):
return vbr["MftClusNumber"] * vbr["BytsPerClus"] + vbr["OffsetInImage"]
def get_first_mft_chunk(vbr, raw_image):
raw_image.seek(get_mft_offset(vbr))
return bytearray(raw_image.read(vbr["BytsPerMftChunk"]))
def get_record_headers(mft_chunk, vbr):
return FILE_RECORD_HEADERS.parse(
mft_chunk,
bytes_per_record=vbr["BytsPerRec"],
records_per_chunk=vbr["BytsPerMftChunk"] // vbr["BytsPerRec"]
)["RecordHeaders"]
def is_valid_record_signature(record_header):
return record_header["Magic"] is not None
def apply_record_fixup(mft_chunk, record_header, vbr):
usn = record_header["UpdateSequenceNumber"]
first_fixup_offset = record_header["OffsetInChunk"] + vbr["BytsPerSec"] - 2
end_of_record_offset = record_header["OffsetInChunk"] + vbr["BytsPerRec"]
for i, usn_offset in enumerate(range(first_fixup_offset, end_of_record_offset, vbr["BytsPerSec"])):
if Int16ul.parse(mft_chunk[usn_offset:usn_offset + 2]) != usn:
return False
mft_chunk[usn_offset:usn_offset + 2] = Int16ul.build(record_header["UpdateSequenceArray"][i])
return True
def apply_fixup(mft_chunk, record_headers, vbr):
for record_header in record_headers:
if is_valid_record_signature(record_header):
record_header["IsValidFixup"] = apply_record_fixup(mft_chunk, record_header, vbr)
def is_valid_fixup(record_header):
return record_header["IsValidFixup"]
def is_used(record_header):
return record_header["Flags"]["IN_USE"]
def is_directory(record_header):
return record_header["Flags"]["DIRECTORY"]
def get_sequence_number(record_header):
if is_used(record_header):
return record_header["SequenceNumber"]
else:
return record_header["SequenceNumber"] - 1
def is_base_record(record_header):
return record_header["BaseRecordReference"]["FileRecordNumber"] == 0
def get_base_record_reference(record_header):
base_reference = record_header["BaseRecordReference"]
return base_reference["FileRecordNumber"], base_reference["SequenceNumber"]
def get_attribute_headers(mft_chunk, record_header):
first_attribute_offset = record_header["FirstAttributeOffset"] + record_header["OffsetInChunk"]
res = ATTRIBUTE_HEADERS.parse(mft_chunk, offset=first_attribute_offset)
return res["AttributeHeaders"][:-1]
def get_resident_attribute(mft_chunk, attribute_header):
offset = attribute_header["OffsetInChunk"] + attribute_header["Metadata"]["AttributeOffset"]
return mft_chunk[offset: offset + attribute_header["Metadata"]["AttributeLength"]]
def get_attribute_type(attribute_header):
return attribute_header["Type"]
def get_attribute_name(attribute_header):
return attribute_header["AttributeName"]
def is_resident(attribute_header):
return attribute_header["Residence"]["RESIDENT"]
def get_attribute_header(attribute_headers, attribute_type):
for attribute_header in attribute_headers:
if attribute_header["Type"] == attribute_type:
yield attribute_header
def parse_filename_attribute(filename_attribute):
return FILENAME_ATTRIBUTE.parse(filename_attribute)
def get_non_resident_attribute(vbr, raw_image, mft_chunk, attribute_header, is_allocated):
dataruns_offset_in_chunk = attribute_header["OffsetInChunk"] + attribute_header["Metadata"]["DataRunsOffset"]
dataruns = get_dataruns(mft_chunk, dataruns_offset_in_chunk)
if not dataruns:
raise EmptyNonResidentAttributeError
return NonResidentStream(vbr["BytsPerClus"], vbr["OffsetInImage"], raw_image, dataruns, is_allocated)
def panic_on_invalid_first_record(record_header):
if not is_valid_record_signature(record_header):
sys_exit(f"INDXRipper: error: invalid 'FILE' signature in first file record")
if not is_valid_fixup(record_header):
sys_exit(f"INDXRipper: error: fixup validation failed for first file record")
def get_mft_data_attribute(vbr, raw_image):
panic_on_invalid_boot_sector(vbr)
mft_chunk = get_first_mft_chunk(vbr, raw_image)
record_headers = get_record_headers(mft_chunk, vbr)
apply_fixup(mft_chunk, record_headers, vbr)
panic_on_invalid_first_record(record_headers[0])
attribute_headers = get_attribute_headers(mft_chunk, record_headers[0])
mft_data_attribute_header = next(get_attribute_header(attribute_headers, "DATA"))
return get_non_resident_attribute(vbr, raw_image, mft_chunk, mft_data_attribute_header, True)
def get_mft_chunks(vbr, mft_data_attribute_stream):
while current_chunk := mft_data_attribute_stream.read(vbr["BytsPerMftChunk"]):
yield current_chunk
| 9,484 | 3,446 |
from ortools.linear_solver import pywraplp
from ortools.sat.python import cp_model
my_program = [
["inp", "w"],
["mul", "x", "0"],
["add", "x", "z"],
["mod", "x", "26"],
["div", "z", "1"],
["add", "x", "12"],
["eql", "x", "w"],
["eql", "x", "0"],
["mul", "y", "0"],
["add", "y", "25"],
["mul", "y", "x"],
["add", "y", "1"],
["mul", "z", "y"],
["mul", "y", "0"],
["add", "y", "w"],
["add", "y", "6"],
["mul", "y", "x"],
["add", "z", "y"],
["inp", "w"],
["mul", "x", "0"],
["add", "x", "z"],
["mod", "x", "26"],
["div", "z", "1"],
["add", "x", "10"],
["eql", "x", "w"],
["eql", "x", "0"],
["mul", "y", "0"],
["add", "y", "25"],
["mul", "y", "x"],
["add", "y", "1"],
["mul", "z", "y"],
["mul", "y", "0"],
["add", "y", "w"],
["add", "y", "6"],
["mul", "y", "x"],
["add", "z", "y"],
["inp", "w"],
["mul", "x", "0"],
["add", "x", "z"],
["mod", "x", "26"],
["div", "z", "1"],
["add", "x", "13"],
["eql", "x", "w"],
["eql", "x", "0"],
["mul", "y", "0"],
["add", "y", "25"],
["mul", "y", "x"],
["add", "y", "1"],
["mul", "z", "y"],
["mul", "y", "0"],
["add", "y", "w"],
["add", "y", "3"],
["mul", "y", "x"],
["add", "z", "y"],
["inp", "w"],
["mul", "x", "0"],
["add", "x", "z"],
["mod", "x", "26"],
["div", "z", "26"],
["add", "x", "-11"],
["eql", "x", "w"],
["eql", "x", "0"],
["mul", "y", "0"],
["add", "y", "25"],
["mul", "y", "x"],
["add", "y", "1"],
["mul", "z", "y"],
["mul", "y", "0"],
["add", "y", "w"],
["add", "y", "11"],
["mul", "y", "x"],
["add", "z", "y"],
["inp", "w"],
["mul", "x", "0"],
["add", "x", "z"],
["mod", "x", "26"],
["div", "z", "1"],
["add", "x", "13"],
["eql", "x", "w"],
["eql", "x", "0"],
["mul", "y", "0"],
["add", "y", "25"],
["mul", "y", "x"],
["add", "y", "1"],
["mul", "z", "y"],
["mul", "y", "0"],
["add", "y", "w"],
["add", "y", "9"],
["mul", "y", "x"],
["add", "z", "y"],
["inp", "w"],
["mul", "x", "0"],
["add", "x", "z"],
["mod", "x", "26"],
["div", "z", "26"],
["add", "x", "-1"],
["eql", "x", "w"],
["eql", "x", "0"],
["mul", "y", "0"],
["add", "y", "25"],
["mul", "y", "x"],
["add", "y", "1"],
["mul", "z", "y"],
["mul", "y", "0"],
["add", "y", "w"],
["add", "y", "3"],
["mul", "y", "x"],
["add", "z", "y"],
["inp", "w"],
["mul", "x", "0"],
["add", "x", "z"],
["mod", "x", "26"],
["div", "z", "1"],
["add", "x", "10"],
["eql", "x", "w"],
["eql", "x", "0"],
["mul", "y", "0"],
["add", "y", "25"],
["mul", "y", "x"],
["add", "y", "1"],
["mul", "z", "y"],
["mul", "y", "0"],
["add", "y", "w"],
["add", "y", "13"],
["mul", "y", "x"],
["add", "z", "y"],
["inp", "w"],
["mul", "x", "0"],
["add", "x", "z"],
["mod", "x", "26"],
["div", "z", "1"],
["add", "x", "11"],
["eql", "x", "w"],
["eql", "x", "0"],
["mul", "y", "0"],
["add", "y", "25"],
["mul", "y", "x"],
["add", "y", "1"],
["mul", "z", "y"],
["mul", "y", "0"],
["add", "y", "w"],
["add", "y", "6"],
["mul", "y", "x"],
["add", "z", "y"],
["inp", "w"],
["mul", "x", "0"],
["add", "x", "z"],
["mod", "x", "26"],
["div", "z", "26"],
["add", "x", "0"],
["eql", "x", "w"],
["eql", "x", "0"],
["mul", "y", "0"],
["add", "y", "25"],
["mul", "y", "x"],
["add", "y", "1"],
["mul", "z", "y"],
["mul", "y", "0"],
["add", "y", "w"],
["add", "y", "14"],
["mul", "y", "x"],
["add", "z", "y"],
["inp", "w"],
["mul", "x", "0"],
["add", "x", "z"],
["mod", "x", "26"],
["div", "z", "1"],
["add", "x", "10"],
["eql", "x", "w"],
["eql", "x", "0"],
["mul", "y", "0"],
["add", "y", "25"],
["mul", "y", "x"],
["add", "y", "1"],
["mul", "z", "y"],
["mul", "y", "0"],
["add", "y", "w"],
["add", "y", "10"],
["mul", "y", "x"],
["add", "z", "y"],
["inp", "w"],
["mul", "x", "0"],
["add", "x", "z"],
["mod", "x", "26"],
["div", "z", "26"],
["add", "x", "-5"],
["eql", "x", "w"],
["eql", "x", "0"],
["mul", "y", "0"],
["add", "y", "25"],
["mul", "y", "x"],
["add", "y", "1"],
["mul", "z", "y"],
["mul", "y", "0"],
["add", "y", "w"],
["add", "y", "12"],
["mul", "y", "x"],
["add", "z", "y"],
["inp", "w"],
["mul", "x", "0"],
["add", "x", "z"],
["mod", "x", "26"],
["div", "z", "26"],
["add", "x", "-16"],
["eql", "x", "w"],
["eql", "x", "0"],
["mul", "y", "0"],
["add", "y", "25"],
["mul", "y", "x"],
["add", "y", "1"],
["mul", "z", "y"],
["mul", "y", "0"],
["add", "y", "w"],
["add", "y", "10"],
["mul", "y", "x"],
["add", "z", "y"],
["inp", "w"],
["mul", "x", "0"],
["add", "x", "z"],
["mod", "x", "26"],
["div", "z", "26"],
["add", "x", "-7"],
["eql", "x", "w"],
["eql", "x", "0"],
["mul", "y", "0"],
["add", "y", "25"],
["mul", "y", "x"],
["add", "y", "1"],
["mul", "z", "y"],
["mul", "y", "0"],
["add", "y", "w"],
["add", "y", "11"],
["mul", "y", "x"],
["add", "z", "y"],
["inp", "w"],
["mul", "x", "0"],
["add", "x", "z"],
["mod", "x", "26"],
["div", "z", "26"],
["add", "x", "-11"],
["eql", "x", "w"],
["eql", "x", "0"],
["mul", "y", "0"],
["add", "y", "25"],
["mul", "y", "x"],
["add", "y", "1"],
["mul", "z", "y"],
["mul", "y", "0"],
["add", "y", "w"],
["add", "y", "15"],
["mul", "y", "x"],
["add", "z", "y"],
]
def main():
registers = {r: r for r in "wxyz"}
program = []
try:
with open("inputs/2021/24_") as f:
for line in f:
program.append(line.split())
except:
program = my_program
# program = []
for i, inst in enumerate(program):
# print(f"{registers} | {inst}")
if inst[0] == "inp":
print(f"constraint v[{i}] = d[{i//18}];")
registers[inst[1]] = f"v[{i}]"
continue
op1, op2 = inst[1:]
op2 = registers[op2] if op2 in registers else int(op2)
if inst[0] == "add":
print(f"constraint v[{i}] = {registers[op1]} + {op2};")
elif inst[0] == "mul":
print(f"constraint v[{i}] = {registers[op1]} * {op2};")
elif inst[0] == "mod":
print(f"constraint v[{i}] = {registers[op1]} mod {op2};")
elif inst[0] == "div":
print(f"constraint v[{i}] = {registers[op1]} div {op2};")
elif inst[0] == "eql":
print(
f"constraint v[{i}] = if {registers[op1]} == {op2} then 1 else 0 endif;"
)
else:
assert False
registers[op1] = f"v[{i}]"
if __name__ == "__main__":
main()
| 7,130 | 3,300 |
import xlsxwriter
from slugify import slugify
import os
def write_to_xlsx(filename, title="Worksheet", data=None):
directory = os.path.dirname(filename)
if not os.path.exists(directory):
os.makedirs(directory)
workbook = xlsxwriter.Workbook(filename)
worksheet = workbook.add_worksheet(slugify(title)[:28])
row_count = 0
for row in data:
cell_count = 0
for cell in row:
worksheet.write(row_count, cell_count, cell)
cell_count += 1
row_count += 1
workbook.close()
| 486 | 189 |
import numpy as np
def sisEcua(mat_A, mat_B):
a_inv = np.linalg.inv(mat_A)
C = a_inv.dot(mat_B.T)
return C
def matrices(sm, smm, smy, smn, datos, cant_datos):
dimension = datos+1
s = (dimension, dimension)
mat_A = np.zeros(s)
mat_B = np.matrix(smy)
# contadores
n = len(smn)
fin = datos-1
con_master = fin-1
ini = 0
fil = 1
col = 1
# primer numero ubicado
mat_A[0][0] = cant_datos
for i in range(0, datos):
mat_A[i+1][i+1] = smm[i]
mat_A[0][i+1] = sm[i]
mat_A[i+1][0] = sm[i]
# ubicacion de la variables multiplicadas por otras variables
for i in range(1, datos):
for j in range(ini, fin):
mat_A[i][col+1] = smn[j]
mat_A[col+1][i] = smn[j]
col += 1
fil += 1
col = col-con_master
ini = fin
fin = fin+con_master
con_master -= 1
#para visualizar las matrices
# print(mat_A)
# print(mat_B)
return sisEcua(mat_A, mat_B)
def multilineal(var_dependiente, var_independiente, nombre_variables):
variables = len(nombre_variables)
sis_ecuaciones = len(nombre_variables)+1
cant_datos = len(var_dependiente)
# vectores auxiliares
var_al_cuadrado = []
var_por_y = []
var_multiplicadas = []
# vectores de las sumas
suma_var_al_cuadrado = []
suma_var = []
suma_por_y = []
suma_de_var_por_var = []
# variable dependiente
y = np.array(var_dependiente)
sum_y = np.sum(y)
suma_por_y.append(sum_y)
# multiplicaciones de m*n, m*p y n*p
k = 1
# operaciones
for var_i in range(variables):
m = np.array(var_independiente[var_i])
y_por_m = y*m
m_cuadrado = m*m
# anade las m**2 y los m*y
var_al_cuadrado.append(m_cuadrado)
var_por_y.append(y_por_m)
# sumas
suma_mm = np.sum(m_cuadrado)
suma_var_al_cuadrado.append(suma_mm)
suma_m = np.sum(m)
suma_var.append(suma_m)
suma_my = np.sum(y_por_m)
suma_por_y.append(suma_my)
# multiplicaciones cor cada variable
for i in range(k, variables):
n = np.array(var_independiente[i])
multipl = m*n
var_multiplicadas.append(multipl)
# suma de las multiplicaciones
suma_mn = np.sum(multipl)
suma_de_var_por_var.append(suma_mn)
k += 1
""" #para visualizar las sumatorias
print(var_al_cuadrado)
print(var_por_y)
print(var_multiplicadas)
print(suma_var)
print(suma_var_al_cuadrado)
print(suma_por_y)
print(suma_de_var_por_var) """
resultado=matrices(suma_var, suma_var_al_cuadrado,
suma_por_y, suma_de_var_por_var, variables, cant_datos)
#resultados finales
ecuacion_final='y = '
print('\n COEFICIENTES DEL AJUSTE LINEAL MULTIPLE\n')
for i in range(0,variables+1):
solucion=float(resultado[i])
sol_redondeada="{0:.7f}".format(solucion)
print(f' a{i} = {sol_redondeada} ')
if i>0:
ec=' + '+str(sol_redondeada)+'*'+str(nombre_variables[i-1])
else:
ec=str(sol_redondeada)
ecuacion_final=ecuacion_final+ec
print('\n La ecuacion de ajuste es:\n')
print(f' {ecuacion_final}')
print('\nNota: y = Var. Dependiente')
# datos de prueba
#set 1
""" agua = [27.5, 28, 28.8, 29.1, 30, 31, 32]
cal = [2, 3.5, 4.5, 2.5, 8.5, 10.5, 13.5]
puzo = [18, 16.5, 10.5, 2.5, 9, 4.5, 1.5]
dr = [5, 2, 3, 4, 1, 2, 3]
gh = [7, 2, 1, 1, 1, 6, 7]
puzos = [15, 15.5, 11.5, 5, 5, 3, 1]
variables_data = [cal, puzo]
variable = ['u', 'v']
variables_data = [cal, puzo, dr, gh, puzos]
variable = ['u', 'v', 'w', 'z', 's'] """
#set 2
""" u=[0.02,0.02,0.02,0.02,0.1,0.1,0.1,0.1,0.18,0.18,0.18,0.18]
v=[1000,1100,1200,1300,1000,1100,1200,1300,1000,1100,1200,1300]
fuv=[78.9,65.1,55.2,56.4,80.9,69.7,57.4,55.4,85.3,71.8,60.7,58.9]
variables_data = [u,v]
variable = ['u', 'v'] """
""" agua = [27.5, 28, 28.8, 29.1, 30, 31, 32]
cal = [2, 3.5, 4.5, 2.5, 8.5, 10.5, 13.5]
puzo = [18, 16.5, 10.5, 2.5, 9, 4.5, 1.5]
variables_data = [cal, puzo]
variable = ['u', 'v']
multilineal(agua, variables_data, variable) """
| 4,251 | 1,983 |
#Para mostra qualquer coisa na tela, Você usa o print()
print('Alesson', 'Sousa', sep='_')# O função sep='-' fala para o print o que colocar para separa os nome
print('Alesson', 'Sousa', sep='_', end='\n')#esse end='' é para fala o que vc quer no final da linha do print
#Exemplo
#123.456.789-00
print('123','456','789', sep='.', end='-')
print('00') | 354 | 154 |
# 闭包
def func():
a = 1
b = 2
return a + b
def sum(a):
def add(b):
return a + b
return add
# add 函数名或函数的引用
# add() 函数的调用
num1 = func()
num2 = sum(2)
print(num2(5))
print(type(num1))
print(type(num2))
| 232 | 119 |
"""About tags inherited from standard mode
Attributes
BASE_GRAMMAR: The base grammar for python mode
tag_manager: The tag manager for python mode
"""
from pathlib import Path
from ...tags.manager import TagManager as TagManagerStandard
from ...tags.grammar import Grammar
from ...tags.tag import Tag as TagStandard
BASE_GRAMMAR = Grammar(Path(__file__).parent / 'grammar.lark') # type: Grammar
class TagManager(TagManagerStandard):
"""Tag manager for tags in python mode"""
INSTANCE = None
tags = {}
# pylint: disable=invalid-name
tag_manager = TagManager() # type: TagManager
class Tag(TagStandard, use_parser=True):
"""The base tag class for tags in python mode"""
BASE_GRAMMAR = BASE_GRAMMAR
| 728 | 218 |
# data_loader
# __init__.py
from data_loader.image_data_loader import ImageDataLoader
from data_loader.coco_data_loader import COCODataLoader
from data_loader.tf_example_loader import TFExampleLoader
| 201 | 61 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2017-02-25 11:38
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cms_articles', '0007_plugins'),
]
operations = [
migrations.AlterModelOptions(
name='article',
options={'ordering': ('-order_date',), 'permissions': (('publish_article', 'Can publish article'),), 'verbose_name': 'article', 'verbose_name_plural': 'articles'},
),
migrations.AddField(
model_name='article',
name='revision_id',
field=models.PositiveIntegerField(default=0, editable=False),
),
migrations.AlterField(
model_name='articleplugin',
name='cmsplugin_ptr',
field=models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='cms_articles_articleplugin', serialize=False, to='cms.CMSPlugin'),
),
migrations.AlterField(
model_name='articlescategoryplugin',
name='cmsplugin_ptr',
field=models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='cms_articles_articlescategoryplugin', serialize=False, to='cms.CMSPlugin'),
),
migrations.AlterField(
model_name='articlesplugin',
name='cmsplugin_ptr',
field=models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='cms_articles_articlesplugin', serialize=False, to='cms.CMSPlugin'),
),
migrations.AlterField(
model_name='articlesplugin',
name='trees',
field=models.ManyToManyField(blank=True, limit_choices_to={'application_urls': 'CMSArticlesApp', 'node__site_id': 1, 'publisher_is_draft': False}, related_name='_articlesplugin_trees_+', to='cms.Page', verbose_name='trees'),
),
]
| 2,131 | 663 |
from time import sleep
import pytest
import torch
from torchvision.ops import roi_align
class TestTorchvision:
@pytest.fixture(scope="class", params=[3, 17])
def num_points(self, request):
return request.param
@pytest.fixture(scope="class", params=[10, 64])
def width(self, request):
return request.param
@pytest.fixture(scope="class", params=[10, 64])
def height(self, request):
return request.param
@pytest.fixture(scope="class", params=["cpu", "cuda"])
def device(self, request):
return torch.device(request.param)
@pytest.fixture(scope="class")
def inputs(self, device: torch.device) -> torch.Tensor:
return torch.rand(size=(8, 3, 256, 256), device=device, dtype=torch.float32)
@pytest.fixture(scope="class")
def boxes(
self, device: torch.device, width: int, height: int, num_points: int
) -> torch.Tensor:
ids = torch.arange(end=num_points, device=device).unsqueeze(1)
x1 = torch.randint(high=width // 2, size=(num_points, 1), device=device)
y1 = torch.randint(high=height // 2, size=(num_points, 1), device=device)
x2, y2 = x1 + width, y1 + height
return torch.cat([ids, x1, y1, x2, y2], dim=1).to(torch.float32)
@pytest.mark.parametrize("aligned", [True, False])
def test_forward(
self, inputs: torch.Tensor, boxes: torch.Tensor, width: int, height: int, aligned: bool
):
sleep(1)
roi_align(input=inputs, boxes=boxes, output_size=(width, height), aligned=aligned)
| 1,561 | 560 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# =============================================================================
# Created By : Ardalan Naseri
# Created Date: Mon September 21 2020
# =============================================================================
"""The module is a VCF reader to parse input VCF file."""
import gzip
import random
def eff_split(string_input, array_vals, delimeter='\t'):
counter = 0
start_pos = 0
end_pos = start_pos
while start_pos < len(string_input) - 1:
end_pos = start_pos + 1
while end_pos < len(string_input) and string_input[end_pos] != delimeter and start_pos != end_pos:
end_pos += 1
array_vals[counter] = string_input[start_pos:end_pos]
start_pos = end_pos + 1
counter = counter + 1
class VCFReader:
def __init__(self, vcf_input_compressed):
self.vcf_file = gzip.open(vcf_input_compressed)
self.samples = []
self.done = False
self.vals = []
self.genome_pos = []
self.valid = True
self.entries_started = False
self.inter_vals = None
self._line = None
def set_samples(self):
done = False
while not done:
line = self.vcf_file.readline()
if not line:
done = True
self.done = True
continue
if '#CHROM' in line:
self.entries_started = True
i = 9
_values = line.replace("\n", "").split()
while i < len(_values):
self.samples.append(_values[i])
i += 1
self.vals = [0] * len(self.samples)
self.inter_vals = ['0|1'] * (len(self.samples) + 9)
done = True
def read_next_site(self):
site_counter = 0
line = self.vcf_file.readline().replace("\n", "")
self._line = line
self.valid = True
if not line:
self.done = True
self.vcf_file.close()
return False
if self.entries_started:
eff_split(line, self.inter_vals, '\t')
_pos = self.inter_vals[1]
alt = self.inter_vals[4]
if len(alt.split(',')) > 1:
self.valid = False
return True
i = 2
while i < len(self.inter_vals) and self.inter_vals[i] != 'GT':
i += 1
i += 1
if i >= len(self.inter_vals):
self.valid = False
return True
tags = self.inter_vals[7]
if len(self.inter_vals[3]) > 1 or len(self.inter_vals[4]) > 1:
self.valid = False
return True
i = 9
site_values = ''
j = 0
while i < len(self.inter_vals):
site_values = self.inter_vals[i].replace("\n", '').split("|")
if site_values[0] == '.' or len(site_values) < 2 or (len(site_values) > 1 and site_values[1] == '.'):
self.valid = False
return True
al1 = int(site_values[0])
al2 = int(site_values[1])
if al1 == al2:
self.vals[j] = al1
else:
self.vals[j] = random.randint(0, 1)
j = j + 1
i += 1
self.genome_pos.append(self.inter_vals[1])
site_counter = site_counter + 1
return True
| 3,550 | 1,078 |
"""
Layout definitions
"""
from libqtile import layout
from .settings import COLS
from libqtile.config import Match
_layout_common_settings = dict(
border_focus=COLS['purple_4'],
border_normal=COLS['dark_1'],
single_border_width=0,
)
_max_layout_settings = {
**_layout_common_settings,
"border_focus": None
}
# Layouts
floating_layout = layout.Floating(float_rules=[
Match(wm_class='float'),
Match(wm_class='floating'),
Match(wm_class="zoom"),
])
layouts = [
layout.MonadTall(name='GapsBig', **_layout_common_settings, margin=192),
layout.MonadTall(name='GapsSmall', **_layout_common_settings, margin=48),
# layout.Floating(**_layout_common_settings),
# layout.VerticalTile(name='VerticalTile'),
layout.Max(name='Full', **_layout_common_settings),
# layout.Zoomy(**_layout_common_settings),
# layout.Slice(**_layout_common_settings),
]
| 899 | 320 |
from django.test import TestCase
from django.contrib.auth.models import User
from .models import Profile, Neighbourhood, Post, Business
# Create your tests here.
class ProfileTestClass(TestCase):
'''
Test case for the Profile class
'''
def setUp(self):
'''
Method that creates an instance of Profile class
'''
# Create instance of Profile class
self.new_profile = Profile(bio="I am superwoman")
def test_instance(self):
'''
Test case to check if self.new_profile in an instance of Profile class
'''
self.assertTrue(isinstance(self.new_profile, Profile))
def test_get_other_profiles(self):
'''
Test case to check if all profiles are gotten from the database
'''
self.eliane = User(username="elly")
self.eliane.save()
self.eliane = User(username="habibi")
self.eliane.save()
self.test_profile = Profile(user=self.eliane, bio="Another Profile")
gotten_profiles = Profile.get_other_profiles(self.eliane.id)
profiles = Profile.objects.all()
class Neighbourhood(TestCase):
'''
Test case for the Neighbourhood class
'''
def setUp(self):
'''
Method that creates an instance of Profile class
'''
# Create a Image instance
self.new_Image = Image(
caption='hey')
def test_instance(self):
'''
Test case to check if self.new_Image in an instance of Image class
'''
self.assertTrue(isinstance(self.new_Image, Image))
class Post(TestCase):
'''
Test case for the Comment class
'''
def setUp(self):
'''
Method that creates an instance of Comment class
'''
# Create a Comment instance
self.new_comment = Comment(
comment_content='hey')
def test_instance(self):
'''
Test case to check if self.new_comment in an instance of Comment class
'''
self.assertTrue(isinstance(self.new_comment, Comment))
def test_get_Image_comments(self):
'''
Test case to check if get Image comments is getting comments for a specific Image
'''
self.eliane = User(username="eli")
self.eliane.save()
self.eliane = User(username="habibi")
self.eliane.save()
self.test_profile = Profile(user=self.eliane, bio="Another Profile")
self.test_Image = Image(user=self.eliane, caption="Another Profile")
self.test_comment = Comment(
Image=self.test_Image, comment_content="Wow")
gotten_comments = Comment.get_Image_comments(self.test_Image.id)
comments = Comment.objects.all()
# No comments were saved so expect True
self.assertTrue(len(gotten_comments) == len(comments))
| 2,865 | 803 |
from rest_framework.routers import SimpleRouter
from api.suids import views
router = SimpleRouter()
router.register(r'suids', views.SuidViewSet, basename='suid')
urlpatterns = router.urls
| 190 | 59 |
"""
Copyright (C) 2020 Nederlandse Organisatie voor Toegepast Natuur-
wetenschappelijk Onderzoek TNO / TNO, Netherlands Organisation for
applied scientific research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: Maaike de Boer, Roos Bakker
@contact: maaike.deboer@tno.nl, roos.bakker@tno.nl
"""
import ast
# This script transforms POStagged text to a FLINT frame.
import json
from typing import Tuple
import pandas as pd
action_verbs = ['aanbrengen', 'aanwijzen', 'achterwege blijven', 'afnemen', 'afwijken', 'afwijzen',
'ambtshalve verlenen', 'ambtshalve verlengen', 'annuleren', 'behandelen', 'beheren', 'bepalen',
'beperken', 'betreden', 'beveiligen', 'bevelen', 'bevorderen', 'bieden gelegenheid', 'bijhouden',
'buiten behandeling stellen', 'buiten werking stellen', 'doorzoeken', 'erop wijzen',
'gebruiken maken van', 'gedwongen ontruimen', 'geven', 'heffen', 'in bewaring stellen',
'in de gelegenheid stellen zich te doen horen', 'in kennis stellen', 'in werking doen treden',
'in werking stellen', 'indienen', 'innemen', 'instellen', 'intrekken', 'invorderen', 'inwilligen',
'maken', 'naar voren brengen', 'nemen', 'niet in behandeling nemen', 'niet-ontvankelijk verklaren',
'nogmaals verlengen', 'om niet vervoeren', 'onderwerpen', 'onderzoeken', 'ongewenstverklaren',
'onmiddellijk bepalen', 'onmiddellijk verlaten', 'ontnemen', 'ontvangen', 'opheffen', 'opleggen',
'oproepen', 'overbrengen', 'overdragen', 'plaatsen', 'schorsen', 'schriftelijk in kennis stellen',
'schriftelijk laten weten', 'schriftelijk mededelen', 'schriftelijk naar voren brengen', 'signaleren',
'sluiten', 'staande houden', 'stellen', 'straffen', 'ter hand stellen', 'teruggeven',
'tijdelijk in bewaring nemen', 'toetsen', 'toezenden', 'uitstellen', 'uitvaardigen', 'uitzetten',
'van rechtswege verkrijgen', 'vaststellen', 'vergelijken', 'verhalen', 'verhogen', 'verklaren',
'verkorten', 'verkrijgen', 'verlaten', 'verlenen', 'verlengen', 'verplichten', 'verschaffen',
'verstrekken', 'verzoeken', 'voegen', 'vorderen', 'vragen', 'willigen', 'weigeren', 'wijzigen']
set_propernouns = ["PRP", "PRP$", "NNP", "NNPS"]
list_act = []
list_fact = []
global facts_list
def read_csv_to_df(csv_file):
datafrm = pd.read_csv(csv_file)
print("csv loaded from " + csv_file)
return datafrm
def write_df_to_csv(df, fle):
df.to_csv(fle)
print("df written to " + fle)
def get_empty_flint_frame_format() -> dict:
flint_frame = {
"acts": [],
"facts": [],
"duties": []
}
return flint_frame
def get_empty_act_frame() -> dict:
act_frame = {
"act": "",
"actor": "",
"action": "",
"object": "",
"recipient": "",
"preconditions": {
"expression": "LITERAL",
"operand": True
},
"create": [],
"terminate": [],
"sources": [], # with validFrom, validTo, citation juriconnect and text
"explanation": ""
}
return act_frame
def get_empty_fact_frame() -> dict:
fact_frame = {
"fact": "",
"function": [],
"sources": [], # with validFrom, validTo, citation juriconnect and text
"explanation": ""
}
return fact_frame
def get_source_dict(row, text, name_law) -> dict:
source_dict = {"validFrom": row["Versie"]}
try:
source_dict["citation"] = "art. " + row['jci 1.3'].split("artikel=")[1].split('&')[0] + "lid " + \
row['jci 1.3'].split("lid=")[1].split('&')[0] + ", " + name_law
except:
# if split("lid=")[1] is not filled in, do not add this part
source_dict["citation"] = "art. " + row['jci 1.3'].split("artikel=")[1].split('&')[0] + ", " + name_law
source_dict['text'] = text.replace('\n', '').replace('\r', '').replace("\t", " ")
source_dict['juriconnect'] = row['jci 1.3']
return source_dict
def create_fact_or_act_function(list_text: list) -> dict:
fact_function = {"expression": "AND"}
fact_function_operands = []
for fct in list_text:
try:
if 'Onderdeel' not in fct and 'Lid' not in fct and len(fct) > 3:
fact_function_operands.append(
"[" + fct.replace('\n', '').replace('\r', '').split(";")[0].replace("\t", "")[1:] + "]") # .
except:
# if the fact is empty or has length of 0, [1:] does not work
'do nothing'
# get rid of the empty list at the beginning
if len(fact_function_operands) > 1:
fact_function_operands.pop(0)
fact_function["operands"] = fact_function_operands
else:
fact_function = {
"expression": "LITERAL",
"operand": True
}
return fact_function
def get_object_and_actor(orig, tags) -> Tuple[str, str]:
vp_found = False
obj = ""
actor_num = -1
# check the index of the verb
for i in range(0, len(tags)):
try:
# find the VP
if tags[i][0] == "VP" and (tags[i][len(tags[i][0])][0] == orig):
vp_found = True
for num in range(1, len(tags[i])):
# get the first NP; this is the object
# TODO: version 2: create better code using dependencies to determine the object and actor
if not vp_found:
# bug fix: no lower, because the link to the actor is gone then
obj += " " + (str(tags[i][num][0]))
# only add NPs if they are in the same sentence as the VP of the act
if "$" in str(tags[i][num][0]) and not vp_found:
obj = ""
# try to find the actor and recipient
# Hack: make a list of characters and check whether the first is uppercased (capitalized)
if tags[i][num][1] in set_propernouns and list(tags[i][num][0])[0].isupper() and actor_num < 0:
list_non_actors = ['Onderdeel', 'Lid', 'Indien', 'Tenzij', 'Onverminderd', 'Nadat']
if not (any(non_actor in tags[i][num][0] for non_actor in list_non_actors)):
# print(tags[i][num])
actor_num = i
except:
# if tags[i][len(tags[i][0])][0] or tags[i][0] does not exist, we have an error
'do nothing'
# the actor is the NP of the actor_num (number in the tags)
actor = ""
# fixed bug: bigger than -1 if the word occurs as the first word
if actor_num > -1:
# range starts with 1, because 0 is the type NP
for nr in range(1, len(tags[actor_num])):
actor += " " + tags[actor_num][nr][0]
# hacks to get a better object
if len(actor) > 1 and actor in obj:
obj = obj.replace(actor, "")
if "kan" in obj:
obj = obj.replace("kan", "")
return actor, obj
def check_infinitive(inf, row) -> bool:
return inf in action_verbs and not \
("het " + inf) in row['Brontekst'] or \
("de " + inf) in row['Brontekst'] or \
("een " + inf) in row['Brontekst']
# This is a first version!
def get_acts(row, verbs, tags, flint_frames, name_law) -> dict:
# for each verb (if one verb this also works)
for infinitive, original in verbs.items():
# if the verb is in the first part (before the :) (could be more verbs)
parts = row['Brontekst'].split(":")
# print("verbs found " + infinitive + " " + original)
# addition to wrong parsing:
# acts are not those that have a determiner before it; Dutch determiners are 'de',
# 'het' and 'een'
# acts are not those that have 'indien' as a form of 'indienen'
if check_infinitive(infinitive, row) and not original == 'indien':
act_frame = get_empty_act_frame()
# print("act found: " + original)
list_act.append([original, row['Brontekst']])
# print(original + "\t" + row['text:'])
act_frame['action'] = "[" + infinitive + "]"
# if we know that there should be preconditions, add them
if ":" in row['Brontekst'] and original in parts[0]:
act_function = create_fact_or_act_function(''.join(parts[1:]).split("$"))
act_frame['preconditions'] = act_function
# print(''.join(parts[1:]).split("$$"))
# print(act_function)
# TODO in version 2: make a fact of the pre-condition
# get_empty_fact_frame()
actor, obj = get_object_and_actor(original, tags)
# hack: first character is a space; use from second on
act_frame['actor'] = "[" + actor[1:] + "]"
act_frame['act'] = "<<" + infinitive + obj.lower() + ">>"
act_frame['object'] = "[" + obj[1:].lower() + "]"
# TODO in version 2: make code better; now only vreemdeling as recipient
if "vreemdeling" in row['Brontekst']:
act_frame['recipient'] = "[vreemdeling]"
source_dict_act = get_source_dict(row, row['Brontekst'], name_law)
act_frame['sources'].append(source_dict_act)
flint_frames['acts'].append(act_frame)
return flint_frames
def get_facts(row, part, name_law) -> dict:
global facts_list
fact_frame = get_empty_fact_frame()
source_dict = get_source_dict(row, part, name_law)
fact_frame['sources'].append(source_dict)
# The facts has to be in between brackets
fact_frame['fact'] = "[" + part.split(":")[0][1:] + "]"
facts_list.append(part.split(":")[0][1:])
# create the function. In case of Artikel 1 this is the (one) definition that is after the :
list_defs = [part.split(":")[1]]
fact_function = create_fact_or_act_function(list_defs)
fact_frame['function'] = fact_function
return fact_frame
def create_flint_frames(df, name_law) -> dict:
flint_frames = get_empty_flint_frame_format()
global facts_list
facts_list = []
# loop through the rows and create acts and facts as we go
for index, row in df.iterrows():
# we start with Facts that are present in the First Article
# try:
# Bug Fix: able to handle all prefixes before Artikel1
if str(row['Nummer'].split("/")[len(row['Nummer'].split("/")) - 1]) == 'Artikel1' and type(
row['Brontekst']) != float:
for part in row['Brontekst'].split("$"):
if ":" in part and not "Onderdeel" in part:
if part.split(":")[0][1:] not in facts_list and len(part.split(":")[1]) > 2:
# Facts
list_fact.append([part.split(":")[0][1:], part.split(":")[1].split(";")[0]])
# print(part.split(":")[0][1:] + "\t" + part.split(":")[1].split(";")[0])
fact_frame = get_facts(row, part, name_law)
flint_frames['facts'].append(fact_frame)
# Acts: only if we have verbs
if not "[]" == row['verbs']:
# hack: make it a dict / list again is we load in a dataframe from another format
verbs = ast.literal_eval(row['verbs'])
tags = ast.literal_eval(row['tags'])
# because more than one act_frame could be created, go on the level of the flint_frames
flint_frames = get_acts(row, verbs, tags, flint_frames, name_law)
else:
'no acts'
return flint_frames
def write_flint_frames_to_json(flint_frames, flint_file):
with open(str(flint_file), 'w') as f:
json.dump(flint_frames, f)
print("flint frames written to " + str(flint_file))
def dataframe_to_frame_parser(csv_file, output_file):
name_law = csv_file.split("_")[len(csv_file.split("_")) - 1].split(".")[0]
if name_law == 'postagged':
name_law = csv_file.split("_")[len(csv_file.split("_")) - 2].split(".")[0]
pos_tagged_df = read_csv_to_df(str(csv_file))
# print(name_law)
flint_frames = create_flint_frames(pos_tagged_df, name_law)
write_flint_frames_to_json(flint_frames, output_file)
# if __name__ == '__main__':
# method = "TOGS"
# base = 'C:\\Users\\boermhtd\\PycharmProjects\\calculemus\\nlp\\data\\csv_files\\postagged\\'
# if method == "TOGS":
# csv_file = base + 'BWBR0043324_2020-03-31_0_TOGS_postagged.csv'
# elif method == "TOZO":
# csv_file = base + 'BWBR0043402_2020-04-22_0_TOZO_postagged.csv'
# elif method == "AWB":
# csv_file = base + 'BWBR0005537_2020-04-15_0_AWB_postagged.csv'
#
# #'BWBR0011823_2019-02-27_Vreemdelingenwet_postagged.csv'
#
# output_file = method + '_new.json'
# dataframe_to_frame_parser(csv_file, output_file)
#
# act_file = "acts_" + method + ".csv"
# df_act = pd.DataFrame(list_act, columns = ['action', 'sentence'])
# df_act.to_csv(act_file, index=False)
#
# fact_file = "facts_" + method + ".csv"
# df_fact = pd.DataFrame(list_fact, columns = ['fact', 'definition'])
# df_fact.to_csv(fact_file, index=False)
# # df = read_csv_to_df(str(csv_file))
# flint_frames = create_flint_frames(df)
# write_flint_frames_to_json(flint_frames)
| 13,944 | 4,661 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from celery import Celery, platforms
app = Celery("task")
app.config_from_object('celery_task.celery_config')
platforms.C_FORCE_ROOT = True
| 208 | 77 |
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Union
import numpy as np
from .builder import NODES
from .node import MultiInputNode, Node
try:
from mmdet.apis import inference_detector, init_detector
has_mmdet = True
except (ImportError, ModuleNotFoundError):
has_mmdet = False
@NODES.register_module()
class DetectorNode(Node):
def __init__(self,
name: str,
model_config: str,
model_checkpoint: str,
input_buffer: str,
output_buffer: Union[str, List[str]],
enable_key: Optional[Union[str, int]] = None,
enable: bool = True,
device: str = 'cuda:0'):
# Check mmdetection is installed
assert has_mmdet, \
f'MMDetection is required for {self.__class__.__name__}.'
super().__init__(name=name, enable_key=enable_key, enable=enable)
self.model_config = model_config
self.model_checkpoint = model_checkpoint
self.device = device.lower()
# Init model
self.model = init_detector(
self.model_config, self.model_checkpoint, device=self.device)
# Register buffers
self.register_input_buffer(input_buffer, 'input', trigger=True)
self.register_output_buffer(output_buffer)
def bypass(self, input_msgs):
return input_msgs['input']
def process(self, input_msgs):
input_msg = input_msgs['input']
img = input_msg.get_image()
preds = inference_detector(self.model, img)
det_result = self._post_process(preds)
input_msg.add_detection_result(det_result, tag=self.name)
return input_msg
def _post_process(self, preds):
if isinstance(preds, tuple):
dets = preds[0]
segms = preds[1]
else:
dets = preds
segms = [None] * len(dets)
det_model_classes = self.model.CLASSES
if isinstance(det_model_classes, str):
det_model_classes = (det_model_classes, )
assert len(dets) == len(det_model_classes)
assert len(segms) == len(det_model_classes)
result = {'preds': [], 'model_cfg': self.model.cfg.copy()}
for i, (cls_name, bboxes,
masks) in enumerate(zip(det_model_classes, dets, segms)):
if masks is None:
masks = [None] * len(bboxes)
else:
assert len(masks) == len(bboxes)
preds_i = [{
'cls_id': i,
'label': cls_name,
'bbox': bbox,
'mask': mask,
} for (bbox, mask) in zip(bboxes, masks)]
result['preds'].extend(preds_i)
return result
@NODES.register_module()
class MultiFrameDetectorNode(DetectorNode, MultiInputNode):
"""Detect hand with one frame in a video clip. The length of clip is
decided on the frame rate and the inference speed of detector.
Parameters:
inference_frame (str): indicate the frame selected in a clip to run
detect hand. Can be set to ('begin', 'mid', 'last').
Default: 'mid'.
"""
def __init__(self,
name: str,
model_config: str,
model_checkpoint: str,
input_buffer: str,
output_buffer: Union[str, List[str]],
inference_frame: str = 'mid',
enable_key: Optional[Union[str, int]] = None,
device: str = 'cuda:0'):
DetectorNode.__init__(
self,
name,
model_config,
model_checkpoint,
input_buffer,
output_buffer,
enable_key,
device=device)
self.inference_frame = inference_frame
def process(self, input_msgs):
"""Select frame and detect hand."""
input_msg = input_msgs['input']
if self.inference_frame == 'last':
key_frame = input_msg[-1]
elif self.inference_frame == 'mid':
key_frame = input_msg[len(input_msg) // 2]
elif self.inference_frame == 'begin':
key_frame = input_msg[0]
else:
raise ValueError(f'Invalid inference_frame {self.inference_frame}')
img = key_frame.get_image()
preds = inference_detector(self.model, img)
det_result = self._post_process(preds)
imgs = [frame.get_image() for frame in input_msg]
key_frame.set_image(np.stack(imgs, axis=0))
key_frame.add_detection_result(det_result, tag=self.name)
return key_frame
| 4,677 | 1,386 |
class HaltSignal(Exception):
def __init__(self):
super().__init__()
class Machine:
def __init__(self, initial_state, possible_states):
self.history = [initial_state.name]
self.possible_states = possible_states
self.current_state = initial_state
def find_state_by_name(self, name):
filtered = list(filter(lambda x: name == x.name, self.possible_states))
if len(filtered):
return filtered[0]
else:
names = [
p.name
for p in self.possible_states
]
raise RuntimeError(f"State '{name}' not found in possible states: {names}")
def run_interaction(self, context: dict):
context = self.current_state.on_start(context)
transition = self.current_state.check_transitions(context)
self.current_state = self.find_state_by_name(transition.next_state)
if transition is None:
raise HaltSignal()
context = transition.callback(context)
self.history.append(self.current_state.name)
return context
def run(self):
context = {}
while self.current_state and not self.current_state.is_final_state():
context = self.run_interaction(context)
self.current_state.on_start(context)
| 1,325 | 372 |
from .array import Array
from .grid import Grid
class Cube(object):
"""three-dimensional array"""
def __init__(self, nrows, ncols, deep, value=None) -> None:
"""Initializes the Cube with nrows, ncols, deep and optional value"""
self.data = Array(deep)
for i in range(deep):
self.data[i] = Grid(nrows, ncols, value)
def __getdeep__(self) -> int:
"""Return the whole cube"""
return len(self.data)
def __str__(self) -> str:
"""Return the cube as a string"""
result = ""
for array in range(self.__getdeep__()):
result += self.data[array].__str__()
result += "\n"
return str(result)
| 708 | 217 |
import os
import sys
import unittest
import uuid
from nanome import PluginInstance
from nanome.api.plugin_instance import _DefaultPlugin
from nanome.api import structure, ui
from nanome.util import enums, Vector3, Quaternion, config
if sys.version_info.major >= 3:
from unittest.mock import MagicMock
else:
# Python 2.7 way of getting magicmock. Requires pip install mock
from mock import MagicMock
class PluginInstanceTestCase(unittest.TestCase):
def setUp(self):
self.custom_data = {'a': 'b'}
self.plugin_instance = PluginInstance()
# Mock args that are passed to setup plugin instance networking
session_id = plugin_network = proc_pipe = log_pipe_conn = \
original_version_table = permissions = MagicMock()
self.plugin_instance._setup(
session_id, plugin_network, proc_pipe, log_pipe_conn,
original_version_table, self.custom_data, permissions
)
self.plugin_instance._network = MagicMock()
def test_on_advanced_settings(self):
self.plugin_instance.on_advanced_settings()
def test_on_complex_added(self):
self.plugin_instance.on_complex_added()
def test_on_complex_removed(self):
self.plugin_instance.on_complex_removed()
def test_on_presenter_change(self):
self.plugin_instance.on_presenter_change()
def test_on_run(self):
self.plugin_instance.on_run()
def test_on_stop(self):
self.plugin_instance.on_stop()
def test_add_bonds(self):
comp = structure.Complex()
self.plugin_instance.add_bonds([comp])
def test_add_dssp(self):
comp = structure.Complex()
self.plugin_instance.add_dssp([comp])
def test_add_to_workspace(self):
comp = structure.Complex()
self.plugin_instance.add_to_workspace([comp])
def test_add_volume(self):
comp = structure.Complex()
volume = None
properties = {}
self.plugin_instance.add_volume(comp, volume, properties)
def test_apply_color_scheme(self):
color_scheme = enums.ColorScheme.Rainbow
target = enums.ColorSchemeTarget.All
only_carbons = False
self.plugin_instance.apply_color_scheme(color_scheme, target, only_carbons)
def test_center_on_structures(self):
self.plugin_instance.center_on_structures([1])
def test_create_atom_stream(self):
index_list = [1, 2, 3]
stream_type = enums.StreamType.color
callback = None
self.plugin_instance.create_atom_stream(index_list, stream_type, callback)
def test_create_reading_stream(self):
index_list = [1, 2, 3]
stream_type = enums.StreamType.color
self.plugin_instance.create_reading_stream(index_list, stream_type)
def test_create_shape(self):
shape_type = enums.ShapeType.Sphere
self.plugin_instance.create_shape(shape_type)
def test_create_stream(self):
index_list = [1, 2, 3]
stream_type = enums.StreamType.color
self.plugin_instance.create_stream(index_list, stream_type)
def test_create_writing_stream(self):
index_list = [1, 2, 3]
stream_type = enums.StreamType.color
self.plugin_instance.create_writing_stream(index_list, stream_type)
def test_open_url(self):
url = 'nanome.ai'
self.plugin_instance.open_url(url, desktop_browser=True)
self.plugin_instance.open_url(url, desktop_browser=False)
def test_plugin_files_path(self):
# Change config to be a file that doesn't exist, so we test creating new directory.
starting_file_path = config.fetch('plugin_files_path')
new_file_path = '/tmp/' + str(uuid.uuid4())
config.set('plugin_files_path', new_file_path)
self.plugin_instance.plugin_files_path
config.set('plugin_files_path', starting_file_path)
def test_request_complex_list(self):
self.plugin_instance.request_complex_list()
def test_request_complexes(self):
comp = structure.Complex()
self.plugin_instance.request_complexes([comp.index])
def test_request_controller_transforms(self):
self.plugin_instance.request_controller_transforms()
def test_request_directory(self):
path = '/path/to/file'
self.plugin_instance.request_directory(path)
def test_request_export(self):
entity = structure.Complex()
self.plugin_instance.request_export(enums.ExportFormats.SDF, entities=entity)
def test_request_files(self):
self.plugin_instance.request_files([])
def test_request_menu_transform(self):
self.plugin_instance.request_menu_transform(0)
def test_request_presenter_info(self):
self.plugin_instance.request_presenter_info()
def test_menu(self):
self.plugin_instance.menu
self.plugin_instance.menu = ui.Menu()
self.plugin_instance.menu
def test_request_workspace(self):
self.plugin_instance.request_workspace()
def test_save_files(self):
self.plugin_instance.save_files([])
def test_send_files_to_load(self):
test_assets = os.getcwd() + ("/testing/test_assets")
test_path = test_assets + "/test_menu.json"
# Test different input formats supported by function
file_list = [test_path]
test_tuple = (test_path, 'test_menu.json')
self.plugin_instance.send_files_to_load(file_list)
self.plugin_instance.send_files_to_load(test_path)
self.plugin_instance.send_files_to_load(test_tuple)
def test_send_notification(self):
notif_type = enums.NotificationTypes.success
msg = 'Success!'
self.plugin_instance.send_notification(notif_type, msg)
def test_set_menu_transform(self):
index = 1
position = Vector3(0, 0, 0)
rotation = Quaternion(1, 1, 1, 1)
scale = Vector3(1, 2, 3)
self.plugin_instance.set_menu_transform(index, position, rotation, scale)
def test_set_plugin_list_button(self):
btn_type = enums.PluginListButtonType.run
self.plugin_instance.set_plugin_list_button(btn_type)
advanced_settings_btn_type = enums.PluginListButtonType.advanced_settings
self.plugin_instance.set_plugin_list_button(
advanced_settings_btn_type, text='test_text', usable=True)
def test_start(self):
self.plugin_instance.start()
def test_update(self):
self.plugin_instance.update()
def test_update_content(self):
btn = ui.Button()
self.plugin_instance.update_content(btn)
self.plugin_instance.update_content([btn])
def test_update_menu(self):
menu = ui.Menu()
self.plugin_instance.update_menu(menu)
def test_update_node(self):
node = ui.LayoutNode()
self.plugin_instance.update_node(node)
self.plugin_instance.update_node([node])
def test_update_structures_deep(self):
comp = structure.Complex()
self.plugin_instance.update_structures_deep([comp])
def test_update_structures_shallow(self):
comp = structure.Complex()
self.plugin_instance.update_structures_shallow([comp])
def test_update_workspace(self):
workspace = structure.Workspace()
self.plugin_instance.update_workspace(workspace)
def test_zoom_on_structures(self):
comp = structure.Complex()
self.plugin_instance.zoom_on_structures([comp])
def test_default_plugin(self):
# Make sure we can instantiate _DefaultPlugin
default_plugin = _DefaultPlugin()
self.assertTrue(isinstance(default_plugin, PluginInstance))
def test_custom_data(self):
self.assertEqual(self.plugin_instance.custom_data, self.custom_data)
| 8,015 | 2,562 |
"""
Tests of bandits.
"""
import numpy as np
import pytest
from unittest import TestCase
from bandit.bandit import (
CustomBandit,
EpsGreedyBandit,
GreedyBandit,
RandomBandit,
)
from bandit.environment import Environment
from bandit.reward import GaussianReward
class BanditTestCase(TestCase):
def setUp(self):
super().setUp()
N = 5
self.n_rewards = N
self.env = Environment([GaussianReward() for _ in range(N)])
class TestCustomBandit(BanditTestCase):
def test_not_implemented_error(self):
with pytest.raises(NotImplementedError):
CustomBandit(self.env).choose_action()
class TestRandomBandit(BanditTestCase):
def test_smoke(self):
b = RandomBandit(self.env)
assert isinstance(b, RandomBandit)
assert isinstance(b.environment, Environment)
assert len(b.environment) == self.n_rewards
assert b.reward_history == []
assert b.choice_history == []
def test_history(self):
b = RandomBandit(self.env)
rh, ch = b.history
assert rh == []
assert ch == []
assert len(b) == 0
_ = b.action()
rh, ch = b.history
assert len(rh) == 1
assert len(ch) == 1
assert isinstance(rh[0], float)
assert isinstance(ch[0], int)
assert len(b) == 1
for _ in range(99):
_ = b.action()
rh, ch = b.history
assert len(rh) == 100
assert len(ch) == 100
assert isinstance(rh[99], float)
assert isinstance(ch[99], int)
assert len(b) == 100
def test_choose_action(self):
b = RandomBandit(self.env)
assert isinstance(b.choose_action(), int)
def test_action(self):
b = RandomBandit(self.env)
a = b.action()
assert isinstance(b.action(), float)
a2 = b.action()
assert a != a2 # unless we get very unlucky
def test_values(self):
b = RandomBandit(self.env)
assert b.values == [0.0] * len(self.env)
b = RandomBandit(self.env, values=[1.0] * len(self.env))
assert b.values == [1.0] * len(self.env)
class TestGreedyBandit(BanditTestCase):
def test_choose_action(self):
b = GreedyBandit(self.env)
assert np.issubdtype(b.choose_action(), np.integer)
assert np.issubdtype(b.action(), np.floating)
class TestEpsGreedyBandit(BanditTestCase):
def test_choose_action(self):
eps = 1.0
b = EpsGreedyBandit(self.env, eps)
assert hasattr(b, "eps")
assert b.eps == eps
assert np.issubdtype(b.choose_action(), np.integer)
assert np.issubdtype(b.action(), np.floating)
eps = 0.0
b = EpsGreedyBandit(self.env, eps)
assert hasattr(b, "eps")
assert b.eps == eps
assert np.issubdtype(b.choose_action(), np.integer)
assert np.issubdtype(b.action(), np.floating)
eps = 0.1
b = EpsGreedyBandit(self.env, eps)
assert hasattr(b, "eps")
assert b.eps == eps
assert np.issubdtype(b.choose_action(), np.integer)
assert np.issubdtype(b.action(), np.floating)
| 3,175 | 1,104 |
from .release import __version__
from .generate import generate, mark_dirty, dirty, clean
from .exceptions import ParsimonyException
from . import generators
from . import configuration
from . import persistence
from .defaults import set_defaults
set_defaults() | 265 | 70 |
from pyrecard.utils.pyrequest import pyrequest
PLAN_PATH = '/assinaturas/v1/plans'
def create(json):
return pyrequest('POST', PLAN_PATH, json)
def alter(plan_code, json):
return pyrequest('PUT', f'{PLAN_PATH}/{plan_code}', json)
def activate(plan_code):
return pyrequest('PUT', f'{PLAN_PATH}/{plan_code}/activate')
def inactivate(plan_code):
return pyrequest('PUT', f'{PLAN_PATH}/{plan_code}/inactivate')
def fetch(plan_code):
return pyrequest('GET', f'{PLAN_PATH}/{plan_code}')
def fetch_all():
return pyrequest('GET', PLAN_PATH)
| 569 | 218 |
try:
from setuptools import setup, find_packages
from pkg_resources import Requirement, resource_filename
except ImportError:
from distutils.core import setup, find_packages
setup(
name='Aptly-Api-Cli',
version='0.1',
url='https://github.com/TimSusa/aptly_api_cli',
license='MIT',
keywords="aptly aptly-server debian",
author='Tim Susa',
author_email='timsusa@gmx.de',
description='This cli executes remote calls to the Aptly server, without blocking the Aptly database.',
long_description=__doc__,
packages=find_packages(),
package_dir={'aptly_cli': 'aptly_cli'},
# packages=['aptly_cli', 'aptly_cli.api', 'aptly_cli.cli', 'aptly_cli.util'],
# py_modules=['aptly_cli.api.api', 'cli'],
entry_points={
'console_scripts': [
'aptly-cli=aptly_cli.cli.cli:main'
]
},
# data_files=[
# ('configs', ['configs/aptly-cli.conf']),
# ],
# package_data={'configs': ['aptly_cli/configs/aptly-cli.conf']},
platforms='any'
)
filename = resource_filename(Requirement.parse("Aptly-Api-Cli"), "configs/aptly-cli.conf")
| 1,126 | 393 |
class Employee:
def __init__(self,first,last,pay):
self.first = first
self.last = last
self.email = first+last+'@123.com'
self.pay = pay
def fullname(self):
return('{} {}'.format(self.first,self.last))
emp_1 = Employee('hello','world',1900)
emp_2 = Employee('test','world',2000)
print(emp_1)
print(emp_2)
print(emp_1.fullname())
print(emp_2.fullname())
| 417 | 159 |
"""
PASSENGERS
"""
numPassengers = 2290
passenger_arriving = (
(0, 5, 9, 3, 0, 0, 3, 7, 5, 2, 1, 0), # 0
(2, 4, 10, 6, 0, 0, 2, 3, 4, 2, 4, 0), # 1
(4, 9, 7, 4, 2, 0, 4, 5, 7, 4, 6, 0), # 2
(9, 9, 6, 4, 1, 0, 9, 8, 2, 5, 3, 0), # 3
(4, 6, 4, 8, 2, 0, 5, 6, 7, 3, 3, 0), # 4
(4, 3, 5, 3, 1, 0, 5, 8, 2, 2, 0, 0), # 5
(2, 2, 4, 5, 2, 0, 1, 1, 7, 1, 1, 0), # 6
(2, 3, 3, 5, 1, 0, 3, 2, 4, 2, 2, 0), # 7
(2, 7, 5, 5, 0, 0, 6, 3, 4, 8, 1, 0), # 8
(1, 9, 7, 4, 2, 0, 8, 7, 4, 8, 2, 0), # 9
(3, 5, 7, 3, 0, 0, 4, 10, 4, 3, 3, 0), # 10
(1, 4, 6, 2, 1, 0, 2, 3, 6, 8, 0, 0), # 11
(5, 2, 1, 2, 0, 0, 4, 9, 6, 2, 1, 0), # 12
(5, 1, 5, 3, 1, 0, 2, 4, 3, 7, 1, 0), # 13
(3, 6, 6, 2, 1, 0, 5, 4, 0, 4, 0, 0), # 14
(4, 2, 7, 2, 1, 0, 7, 10, 7, 4, 2, 0), # 15
(4, 6, 5, 5, 1, 0, 1, 14, 4, 1, 1, 0), # 16
(3, 5, 4, 2, 3, 0, 3, 5, 2, 6, 1, 0), # 17
(4, 4, 8, 2, 2, 0, 3, 5, 6, 3, 0, 0), # 18
(2, 7, 7, 2, 0, 0, 7, 2, 6, 1, 3, 0), # 19
(3, 7, 7, 2, 0, 0, 8, 9, 3, 1, 2, 0), # 20
(2, 8, 6, 2, 1, 0, 5, 5, 4, 3, 0, 0), # 21
(4, 6, 4, 1, 3, 0, 7, 4, 4, 5, 1, 0), # 22
(1, 5, 4, 3, 1, 0, 1, 5, 3, 5, 3, 0), # 23
(2, 9, 4, 1, 0, 0, 6, 6, 4, 7, 2, 0), # 24
(4, 8, 7, 2, 2, 0, 3, 6, 4, 1, 4, 0), # 25
(4, 6, 5, 2, 4, 0, 2, 0, 2, 4, 0, 0), # 26
(3, 4, 6, 4, 2, 0, 5, 10, 2, 3, 3, 0), # 27
(3, 12, 6, 3, 1, 0, 4, 12, 4, 2, 3, 0), # 28
(7, 8, 3, 3, 1, 0, 3, 3, 3, 4, 2, 0), # 29
(1, 12, 5, 0, 4, 0, 1, 4, 4, 5, 0, 0), # 30
(5, 8, 8, 3, 5, 0, 4, 7, 0, 4, 3, 0), # 31
(1, 14, 4, 4, 0, 0, 7, 7, 2, 3, 1, 0), # 32
(3, 7, 4, 2, 1, 0, 2, 5, 3, 2, 2, 0), # 33
(1, 7, 3, 3, 1, 0, 4, 11, 3, 5, 0, 0), # 34
(2, 5, 5, 4, 0, 0, 7, 6, 4, 5, 0, 0), # 35
(4, 7, 7, 3, 2, 0, 5, 7, 5, 1, 0, 0), # 36
(2, 6, 9, 8, 0, 0, 3, 9, 8, 0, 1, 0), # 37
(3, 4, 6, 2, 4, 0, 4, 5, 2, 0, 1, 0), # 38
(2, 6, 6, 1, 1, 0, 5, 7, 3, 8, 1, 0), # 39
(3, 8, 8, 3, 0, 0, 4, 3, 4, 9, 2, 0), # 40
(2, 3, 2, 2, 1, 0, 4, 9, 3, 6, 3, 0), # 41
(1, 8, 10, 0, 0, 0, 5, 12, 4, 4, 4, 0), # 42
(4, 11, 3, 2, 2, 0, 6, 5, 5, 4, 3, 0), # 43
(2, 7, 12, 2, 1, 0, 1, 4, 4, 1, 1, 0), # 44
(0, 9, 5, 1, 4, 0, 10, 4, 4, 6, 0, 0), # 45
(5, 4, 4, 0, 1, 0, 2, 4, 5, 3, 2, 0), # 46
(2, 5, 4, 0, 0, 0, 5, 9, 5, 5, 0, 0), # 47
(1, 10, 3, 4, 1, 0, 3, 3, 4, 4, 1, 0), # 48
(4, 6, 3, 4, 2, 0, 3, 6, 5, 2, 1, 0), # 49
(3, 6, 4, 5, 0, 0, 5, 9, 7, 3, 1, 0), # 50
(3, 6, 7, 2, 1, 0, 4, 5, 1, 3, 8, 0), # 51
(3, 11, 2, 4, 2, 0, 5, 7, 4, 7, 0, 0), # 52
(3, 8, 7, 3, 2, 0, 6, 9, 4, 3, 2, 0), # 53
(2, 7, 9, 1, 3, 0, 7, 6, 5, 2, 2, 0), # 54
(5, 10, 5, 2, 2, 0, 4, 5, 4, 4, 2, 0), # 55
(2, 6, 6, 1, 5, 0, 3, 3, 2, 3, 2, 0), # 56
(3, 3, 2, 3, 0, 0, 5, 6, 4, 8, 0, 0), # 57
(2, 7, 5, 2, 2, 0, 0, 1, 2, 3, 0, 0), # 58
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # 59
)
station_arriving_intensity = (
(2.649651558384548, 6.796460700757575, 7.9942360218509, 6.336277173913043, 7.143028846153846, 4.75679347826087), # 0
(2.6745220100478, 6.872041598712823, 8.037415537524994, 6.371564387077295, 7.196566506410256, 4.7551721391908215), # 1
(2.699108477221734, 6.946501402918069, 8.07957012282205, 6.406074879227053, 7.248974358974359, 4.753501207729468), # 2
(2.72339008999122, 7.019759765625, 8.120668982969152, 6.4397792119565205, 7.300204326923078, 4.7517809103260875), # 3
(2.747345978441128, 7.091736339085298, 8.160681323193373, 6.472647946859904, 7.350208333333334, 4.750011473429951), # 4
(2.7709552726563262, 7.162350775550646, 8.199576348721793, 6.504651645531401, 7.39893830128205, 4.748193123490338), # 5
(2.794197102721686, 7.231522727272727, 8.237323264781493, 6.535760869565218, 7.446346153846154, 4.746326086956522), # 6
(2.817050598722076, 7.299171846503226, 8.273891276599542, 6.565946180555556, 7.492383814102565, 4.744410590277778), # 7
(2.8394948907423667, 7.365217785493826, 8.309249589403029, 6.595178140096618, 7.537003205128205, 4.7424468599033816), # 8
(2.8615091088674274, 7.429580196496212, 8.343367408419024, 6.623427309782609, 7.580156249999999, 4.740435122282609), # 9
(2.8830723831821286, 7.492178731762065, 8.376213938874606, 6.65066425120773, 7.621794871794872, 4.738375603864734), # 10
(2.9041638437713395, 7.55293304354307, 8.407758385996857, 6.676859525966184, 7.661870993589743, 4.736268531099034), # 11
(2.92476262071993, 7.611762784090908, 8.437969955012854, 6.7019836956521734, 7.700336538461538, 4.734114130434782), # 12
(2.944847844112769, 7.668587605657268, 8.46681785114967, 6.726007321859903, 7.737143429487181, 4.731912628321256), # 13
(2.9643986440347283, 7.723327160493828, 8.494271279634388, 6.748900966183574, 7.772243589743589, 4.729664251207729), # 14
(2.9833941505706756, 7.775901100852272, 8.520299445694086, 6.770635190217391, 7.8055889423076925, 4.7273692255434785), # 15
(3.001813493805482, 7.826229078984287, 8.544871554555842, 6.791180555555555, 7.8371314102564105, 4.725027777777778), # 16
(3.019635803824017, 7.874230747141554, 8.567956811446729, 6.810507623792271, 7.866822916666667, 4.722640134359904), # 17
(3.03684021071115, 7.919825757575757, 8.589524421593831, 6.82858695652174, 7.894615384615387, 4.72020652173913), # 18
(3.053405844551751, 7.962933762538579, 8.609543590224222, 6.845389115338164, 7.9204607371794875, 4.717727166364734), # 19
(3.0693118354306894, 8.003474414281705, 8.62798352256498, 6.860884661835749, 7.944310897435898, 4.71520229468599), # 20
(3.084537313432836, 8.041367365056816, 8.644813423843189, 6.875044157608696, 7.9661177884615375, 4.712632133152174), # 21
(3.099061408643059, 8.076532267115601, 8.660002499285918, 6.887838164251208, 7.985833333333332, 4.710016908212561), # 22
(3.1128632511462295, 8.108888772709737, 8.673519954120252, 6.899237243357488, 8.003409455128205, 4.707356846316426), # 23
(3.125921971027217, 8.138356534090908, 8.685334993573264, 6.909211956521739, 8.018798076923076, 4.704652173913043), # 24
(3.1382166983708903, 8.164855203510802, 8.695416822872037, 6.917732865338165, 8.03195112179487, 4.701903117451691), # 25
(3.1497265632621207, 8.188304433221099, 8.703734647243644, 6.9247705314009655, 8.042820512820512, 4.699109903381642), # 26
(3.160430695785777, 8.208623875473483, 8.710257671915166, 6.930295516304349, 8.051358173076924, 4.696272758152174), # 27
(3.1703082260267292, 8.22573318251964, 8.714955102113683, 6.934278381642512, 8.057516025641025, 4.69339190821256), # 28
(3.1793382840698468, 8.239552006611252, 8.717796143066266, 6.936689689009662, 8.061245993589743, 4.690467580012077), # 29
(3.1875, 8.25, 8.71875, 6.9375, 8.0625, 4.6875), # 30
(3.1951370284526854, 8.258678799715907, 8.718034948671496, 6.937353656045752, 8.062043661347518, 4.683376259786773), # 31
(3.202609175191816, 8.267242897727273, 8.715910024154589, 6.93691748366013, 8.06068439716312, 4.677024758454107), # 32
(3.2099197969948845, 8.275691228693182, 8.712405570652175, 6.936195772058824, 8.058436835106383, 4.66850768365817), # 33
(3.217072250639386, 8.284022727272728, 8.70755193236715, 6.935192810457517, 8.05531560283688, 4.657887223055139), # 34
(3.224069892902813, 8.292236328124998, 8.701379453502415, 6.933912888071895, 8.051335328014185, 4.645225564301183), # 35
(3.23091608056266, 8.300330965909092, 8.69391847826087, 6.932360294117648, 8.046510638297873, 4.630584895052474), # 36
(3.2376141703964194, 8.308305575284091, 8.68519935084541, 6.9305393178104575, 8.040856161347516, 4.614027402965184), # 37
(3.2441675191815853, 8.31615909090909, 8.675252415458937, 6.9284542483660125, 8.034386524822695, 4.595615275695485), # 38
(3.250579483695652, 8.323890447443182, 8.664108016304347, 6.926109375, 8.027116356382978, 4.57541070089955), # 39
(3.2568534207161126, 8.331498579545455, 8.651796497584542, 6.923508986928105, 8.019060283687942, 4.5534758662335495), # 40
(3.26299268702046, 8.338982421874999, 8.638348203502416, 6.920657373366013, 8.010232934397163, 4.529872959353657), # 41
(3.269000639386189, 8.34634090909091, 8.62379347826087, 6.917558823529411, 8.000648936170213, 4.504664167916042), # 42
(3.2748806345907933, 8.353572975852272, 8.608162666062801, 6.914217626633987, 7.990322916666666, 4.477911679576878), # 43
(3.2806360294117645, 8.360677556818182, 8.591486111111111, 6.910638071895424, 7.979269503546099, 4.449677681992337), # 44
(3.286270180626598, 8.367653586647727, 8.573794157608697, 6.906824448529411, 7.967503324468085, 4.420024362818591), # 45
(3.291786445012788, 8.374500000000001, 8.555117149758455, 6.902781045751634, 7.955039007092199, 4.389013909711811), # 46
(3.297188179347826, 8.381215731534091, 8.535485431763284, 6.898512152777777, 7.941891179078015, 4.356708510328169), # 47
(3.3024787404092075, 8.387799715909091, 8.514929347826087, 6.894022058823529, 7.928074468085106, 4.323170352323839), # 48
(3.307661484974424, 8.39425088778409, 8.493479242149759, 6.889315053104576, 7.91360350177305, 4.288461623354989), # 49
(3.312739769820972, 8.40056818181818, 8.471165458937199, 6.884395424836602, 7.898492907801418, 4.252644511077794), # 50
(3.317716951726343, 8.406750532670454, 8.448018342391304, 6.879267463235294, 7.882757313829787, 4.215781203148426), # 51
(3.322596387468031, 8.412796875, 8.424068236714975, 6.87393545751634, 7.86641134751773, 4.177933887223055), # 52
(3.3273814338235295, 8.41870614346591, 8.39934548611111, 6.868403696895425, 7.849469636524823, 4.139164750957854), # 53
(3.332075447570333, 8.424477272727271, 8.373880434782608, 6.8626764705882355, 7.831946808510638, 4.099535982008995), # 54
(3.336681785485933, 8.430109197443182, 8.347703426932366, 6.856758067810458, 7.813857491134752, 4.05910976803265), # 55
(3.341203804347826, 8.435600852272726, 8.320844806763285, 6.8506527777777775, 7.795216312056738, 4.017948296684991), # 56
(3.345644860933504, 8.440951171875001, 8.29333491847826, 6.844364889705882, 7.77603789893617, 3.9761137556221886), # 57
(3.3500083120204605, 8.44615909090909, 8.265204106280192, 6.837898692810458, 7.756336879432624, 3.9336683325004165), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_arriving_acc = (
(0, 5, 9, 3, 0, 0, 3, 7, 5, 2, 1, 0), # 0
(2, 9, 19, 9, 0, 0, 5, 10, 9, 4, 5, 0), # 1
(6, 18, 26, 13, 2, 0, 9, 15, 16, 8, 11, 0), # 2
(15, 27, 32, 17, 3, 0, 18, 23, 18, 13, 14, 0), # 3
(19, 33, 36, 25, 5, 0, 23, 29, 25, 16, 17, 0), # 4
(23, 36, 41, 28, 6, 0, 28, 37, 27, 18, 17, 0), # 5
(25, 38, 45, 33, 8, 0, 29, 38, 34, 19, 18, 0), # 6
(27, 41, 48, 38, 9, 0, 32, 40, 38, 21, 20, 0), # 7
(29, 48, 53, 43, 9, 0, 38, 43, 42, 29, 21, 0), # 8
(30, 57, 60, 47, 11, 0, 46, 50, 46, 37, 23, 0), # 9
(33, 62, 67, 50, 11, 0, 50, 60, 50, 40, 26, 0), # 10
(34, 66, 73, 52, 12, 0, 52, 63, 56, 48, 26, 0), # 11
(39, 68, 74, 54, 12, 0, 56, 72, 62, 50, 27, 0), # 12
(44, 69, 79, 57, 13, 0, 58, 76, 65, 57, 28, 0), # 13
(47, 75, 85, 59, 14, 0, 63, 80, 65, 61, 28, 0), # 14
(51, 77, 92, 61, 15, 0, 70, 90, 72, 65, 30, 0), # 15
(55, 83, 97, 66, 16, 0, 71, 104, 76, 66, 31, 0), # 16
(58, 88, 101, 68, 19, 0, 74, 109, 78, 72, 32, 0), # 17
(62, 92, 109, 70, 21, 0, 77, 114, 84, 75, 32, 0), # 18
(64, 99, 116, 72, 21, 0, 84, 116, 90, 76, 35, 0), # 19
(67, 106, 123, 74, 21, 0, 92, 125, 93, 77, 37, 0), # 20
(69, 114, 129, 76, 22, 0, 97, 130, 97, 80, 37, 0), # 21
(73, 120, 133, 77, 25, 0, 104, 134, 101, 85, 38, 0), # 22
(74, 125, 137, 80, 26, 0, 105, 139, 104, 90, 41, 0), # 23
(76, 134, 141, 81, 26, 0, 111, 145, 108, 97, 43, 0), # 24
(80, 142, 148, 83, 28, 0, 114, 151, 112, 98, 47, 0), # 25
(84, 148, 153, 85, 32, 0, 116, 151, 114, 102, 47, 0), # 26
(87, 152, 159, 89, 34, 0, 121, 161, 116, 105, 50, 0), # 27
(90, 164, 165, 92, 35, 0, 125, 173, 120, 107, 53, 0), # 28
(97, 172, 168, 95, 36, 0, 128, 176, 123, 111, 55, 0), # 29
(98, 184, 173, 95, 40, 0, 129, 180, 127, 116, 55, 0), # 30
(103, 192, 181, 98, 45, 0, 133, 187, 127, 120, 58, 0), # 31
(104, 206, 185, 102, 45, 0, 140, 194, 129, 123, 59, 0), # 32
(107, 213, 189, 104, 46, 0, 142, 199, 132, 125, 61, 0), # 33
(108, 220, 192, 107, 47, 0, 146, 210, 135, 130, 61, 0), # 34
(110, 225, 197, 111, 47, 0, 153, 216, 139, 135, 61, 0), # 35
(114, 232, 204, 114, 49, 0, 158, 223, 144, 136, 61, 0), # 36
(116, 238, 213, 122, 49, 0, 161, 232, 152, 136, 62, 0), # 37
(119, 242, 219, 124, 53, 0, 165, 237, 154, 136, 63, 0), # 38
(121, 248, 225, 125, 54, 0, 170, 244, 157, 144, 64, 0), # 39
(124, 256, 233, 128, 54, 0, 174, 247, 161, 153, 66, 0), # 40
(126, 259, 235, 130, 55, 0, 178, 256, 164, 159, 69, 0), # 41
(127, 267, 245, 130, 55, 0, 183, 268, 168, 163, 73, 0), # 42
(131, 278, 248, 132, 57, 0, 189, 273, 173, 167, 76, 0), # 43
(133, 285, 260, 134, 58, 0, 190, 277, 177, 168, 77, 0), # 44
(133, 294, 265, 135, 62, 0, 200, 281, 181, 174, 77, 0), # 45
(138, 298, 269, 135, 63, 0, 202, 285, 186, 177, 79, 0), # 46
(140, 303, 273, 135, 63, 0, 207, 294, 191, 182, 79, 0), # 47
(141, 313, 276, 139, 64, 0, 210, 297, 195, 186, 80, 0), # 48
(145, 319, 279, 143, 66, 0, 213, 303, 200, 188, 81, 0), # 49
(148, 325, 283, 148, 66, 0, 218, 312, 207, 191, 82, 0), # 50
(151, 331, 290, 150, 67, 0, 222, 317, 208, 194, 90, 0), # 51
(154, 342, 292, 154, 69, 0, 227, 324, 212, 201, 90, 0), # 52
(157, 350, 299, 157, 71, 0, 233, 333, 216, 204, 92, 0), # 53
(159, 357, 308, 158, 74, 0, 240, 339, 221, 206, 94, 0), # 54
(164, 367, 313, 160, 76, 0, 244, 344, 225, 210, 96, 0), # 55
(166, 373, 319, 161, 81, 0, 247, 347, 227, 213, 98, 0), # 56
(169, 376, 321, 164, 81, 0, 252, 353, 231, 221, 98, 0), # 57
(171, 383, 326, 166, 83, 0, 252, 354, 233, 224, 98, 0), # 58
(171, 383, 326, 166, 83, 0, 252, 354, 233, 224, 98, 0), # 59
)
passenger_arriving_rate = (
(2.649651558384548, 5.43716856060606, 4.79654161311054, 2.534510869565217, 1.428605769230769, 0.0, 4.75679347826087, 5.714423076923076, 3.801766304347826, 3.1976944087403596, 1.359292140151515, 0.0), # 0
(2.6745220100478, 5.497633278970258, 4.822449322514997, 2.5486257548309177, 1.439313301282051, 0.0, 4.7551721391908215, 5.757253205128204, 3.8229386322463768, 3.2149662150099974, 1.3744083197425645, 0.0), # 1
(2.699108477221734, 5.557201122334455, 4.8477420736932295, 2.562429951690821, 1.4497948717948717, 0.0, 4.753501207729468, 5.799179487179487, 3.8436449275362317, 3.23182804912882, 1.3893002805836137, 0.0), # 2
(2.72339008999122, 5.6158078125, 4.872401389781491, 2.575911684782608, 1.4600408653846155, 0.0, 4.7517809103260875, 5.840163461538462, 3.863867527173912, 3.2482675931876606, 1.403951953125, 0.0), # 3
(2.747345978441128, 5.673389071268238, 4.896408793916024, 2.589059178743961, 1.4700416666666667, 0.0, 4.750011473429951, 5.880166666666667, 3.883588768115942, 3.2642725292773487, 1.4183472678170594, 0.0), # 4
(2.7709552726563262, 5.729880620440516, 4.919745809233076, 2.6018606582125603, 1.47978766025641, 0.0, 4.748193123490338, 5.91915064102564, 3.9027909873188404, 3.279830539488717, 1.432470155110129, 0.0), # 5
(2.794197102721686, 5.785218181818181, 4.942393958868895, 2.614304347826087, 1.4892692307692306, 0.0, 4.746326086956522, 5.957076923076922, 3.9214565217391306, 3.294929305912597, 1.4463045454545453, 0.0), # 6
(2.817050598722076, 5.83933747720258, 4.964334765959725, 2.626378472222222, 1.498476762820513, 0.0, 4.744410590277778, 5.993907051282052, 3.939567708333333, 3.309556510639817, 1.459834369300645, 0.0), # 7
(2.8394948907423667, 5.89217422839506, 4.985549753641817, 2.638071256038647, 1.5074006410256409, 0.0, 4.7424468599033816, 6.0296025641025635, 3.9571068840579704, 3.3236998357612113, 1.473043557098765, 0.0), # 8
(2.8615091088674274, 5.943664157196969, 5.006020445051414, 2.649370923913043, 1.5160312499999997, 0.0, 4.740435122282609, 6.064124999999999, 3.9740563858695652, 3.3373469633676094, 1.4859160392992423, 0.0), # 9
(2.8830723831821286, 5.993742985409652, 5.025728363324764, 2.660265700483092, 1.5243589743589743, 0.0, 4.738375603864734, 6.097435897435897, 3.990398550724638, 3.3504855755498424, 1.498435746352413, 0.0), # 10
(2.9041638437713395, 6.042346434834456, 5.044655031598114, 2.6707438103864733, 1.5323741987179484, 0.0, 4.736268531099034, 6.129496794871794, 4.0061157155797105, 3.3631033543987425, 1.510586608708614, 0.0), # 11
(2.92476262071993, 6.089410227272726, 5.062781973007712, 2.680793478260869, 1.5400673076923075, 0.0, 4.734114130434782, 6.16026923076923, 4.021190217391304, 3.375187982005141, 1.5223525568181815, 0.0), # 12
(2.944847844112769, 6.134870084525814, 5.080090710689802, 2.690402928743961, 1.547428685897436, 0.0, 4.731912628321256, 6.189714743589744, 4.035604393115942, 3.386727140459868, 1.5337175211314535, 0.0), # 13
(2.9643986440347283, 6.1786617283950624, 5.096562767780632, 2.699560386473429, 1.5544487179487176, 0.0, 4.729664251207729, 6.217794871794871, 4.049340579710144, 3.397708511853755, 1.5446654320987656, 0.0), # 14
(2.9833941505706756, 6.220720880681816, 5.112179667416451, 2.708254076086956, 1.5611177884615384, 0.0, 4.7273692255434785, 6.2444711538461535, 4.062381114130434, 3.408119778277634, 1.555180220170454, 0.0), # 15
(3.001813493805482, 6.26098326318743, 5.126922932733505, 2.716472222222222, 1.5674262820512819, 0.0, 4.725027777777778, 6.2697051282051275, 4.074708333333333, 3.4179486218223363, 1.5652458157968574, 0.0), # 16
(3.019635803824017, 6.299384597713242, 5.140774086868038, 2.724203049516908, 1.5733645833333332, 0.0, 4.722640134359904, 6.293458333333333, 4.0863045742753625, 3.4271827245786914, 1.5748461494283106, 0.0), # 17
(3.03684021071115, 6.3358606060606055, 5.153714652956299, 2.7314347826086958, 1.578923076923077, 0.0, 4.72020652173913, 6.315692307692308, 4.097152173913043, 3.435809768637532, 1.5839651515151514, 0.0), # 18
(3.053405844551751, 6.370347010030863, 5.165726154134533, 2.738155646135265, 1.5840921474358973, 0.0, 4.717727166364734, 6.336368589743589, 4.107233469202898, 3.4438174360896885, 1.5925867525077158, 0.0), # 19
(3.0693118354306894, 6.402779531425363, 5.1767901135389875, 2.7443538647342995, 1.5888621794871793, 0.0, 4.71520229468599, 6.355448717948717, 4.11653079710145, 3.4511934090259917, 1.6006948828563408, 0.0), # 20
(3.084537313432836, 6.433093892045452, 5.186888054305913, 2.750017663043478, 1.5932235576923073, 0.0, 4.712632133152174, 6.372894230769229, 4.125026494565217, 3.4579253695372754, 1.608273473011363, 0.0), # 21
(3.099061408643059, 6.46122581369248, 5.19600149957155, 2.7551352657004826, 1.5971666666666662, 0.0, 4.710016908212561, 6.388666666666665, 4.132702898550725, 3.464000999714367, 1.61530645342312, 0.0), # 22
(3.1128632511462295, 6.487111018167789, 5.204111972472151, 2.759694897342995, 1.6006818910256408, 0.0, 4.707356846316426, 6.402727564102563, 4.139542346014493, 3.4694079816481005, 1.6217777545419472, 0.0), # 23
(3.125921971027217, 6.5106852272727265, 5.211200996143958, 2.763684782608695, 1.6037596153846152, 0.0, 4.704652173913043, 6.415038461538461, 4.1455271739130435, 3.474133997429305, 1.6276713068181816, 0.0), # 24
(3.1382166983708903, 6.531884162808641, 5.217250093723222, 2.7670931461352657, 1.606390224358974, 0.0, 4.701903117451691, 6.425560897435896, 4.150639719202899, 3.4781667291488145, 1.6329710407021603, 0.0), # 25
(3.1497265632621207, 6.550643546576878, 5.222240788346187, 2.7699082125603858, 1.6085641025641022, 0.0, 4.699109903381642, 6.434256410256409, 4.154862318840579, 3.4814938588974575, 1.6376608866442195, 0.0), # 26
(3.160430695785777, 6.566899100378786, 5.226154603149099, 2.772118206521739, 1.6102716346153847, 0.0, 4.696272758152174, 6.441086538461539, 4.158177309782609, 3.484103068766066, 1.6417247750946966, 0.0), # 27
(3.1703082260267292, 6.580586546015712, 5.228973061268209, 2.7737113526570045, 1.6115032051282048, 0.0, 4.69339190821256, 6.446012820512819, 4.160567028985507, 3.4859820408454727, 1.645146636503928, 0.0), # 28
(3.1793382840698468, 6.591641605289001, 5.230677685839759, 2.7746758756038647, 1.6122491987179486, 0.0, 4.690467580012077, 6.448996794871794, 4.162013813405797, 3.487118457226506, 1.6479104013222503, 0.0), # 29
(3.1875, 6.6, 5.23125, 2.775, 1.6124999999999998, 0.0, 4.6875, 6.449999999999999, 4.1625, 3.4875, 1.65, 0.0), # 30
(3.1951370284526854, 6.606943039772726, 5.230820969202898, 2.7749414624183006, 1.6124087322695035, 0.0, 4.683376259786773, 6.449634929078014, 4.162412193627451, 3.4872139794685983, 1.6517357599431814, 0.0), # 31
(3.202609175191816, 6.613794318181818, 5.229546014492753, 2.7747669934640515, 1.6121368794326238, 0.0, 4.677024758454107, 6.448547517730495, 4.162150490196078, 3.4863640096618354, 1.6534485795454545, 0.0), # 32
(3.2099197969948845, 6.620552982954545, 5.227443342391305, 2.774478308823529, 1.6116873670212764, 0.0, 4.66850768365817, 6.446749468085105, 4.161717463235294, 3.4849622282608697, 1.6551382457386363, 0.0), # 33
(3.217072250639386, 6.627218181818182, 5.224531159420289, 2.7740771241830067, 1.6110631205673758, 0.0, 4.657887223055139, 6.444252482269503, 4.16111568627451, 3.4830207729468596, 1.6568045454545455, 0.0), # 34
(3.224069892902813, 6.633789062499998, 5.220827672101449, 2.773565155228758, 1.6102670656028368, 0.0, 4.645225564301183, 6.441068262411347, 4.160347732843137, 3.480551781400966, 1.6584472656249996, 0.0), # 35
(3.23091608056266, 6.6402647727272734, 5.2163510869565215, 2.7729441176470586, 1.6093021276595745, 0.0, 4.630584895052474, 6.437208510638298, 4.159416176470589, 3.477567391304347, 1.6600661931818184, 0.0), # 36
(3.2376141703964194, 6.6466444602272725, 5.211119610507246, 2.7722157271241827, 1.6081712322695032, 0.0, 4.614027402965184, 6.432684929078013, 4.158323590686274, 3.474079740338164, 1.6616611150568181, 0.0), # 37
(3.2441675191815853, 6.652927272727272, 5.205151449275362, 2.7713816993464047, 1.6068773049645388, 0.0, 4.595615275695485, 6.427509219858155, 4.157072549019607, 3.4701009661835744, 1.663231818181818, 0.0), # 38
(3.250579483695652, 6.659112357954545, 5.198464809782608, 2.7704437499999996, 1.6054232712765955, 0.0, 4.57541070089955, 6.421693085106382, 4.155665625, 3.4656432065217384, 1.6647780894886361, 0.0), # 39
(3.2568534207161126, 6.6651988636363635, 5.191077898550724, 2.7694035947712417, 1.6038120567375882, 0.0, 4.5534758662335495, 6.415248226950353, 4.154105392156863, 3.4607185990338163, 1.6662997159090909, 0.0), # 40
(3.26299268702046, 6.671185937499998, 5.1830089221014495, 2.768262949346405, 1.6020465868794325, 0.0, 4.529872959353657, 6.40818634751773, 4.152394424019608, 3.455339281400966, 1.6677964843749995, 0.0), # 41
(3.269000639386189, 6.677072727272728, 5.174276086956522, 2.767023529411764, 1.6001297872340425, 0.0, 4.504664167916042, 6.40051914893617, 4.150535294117646, 3.4495173913043478, 1.669268181818182, 0.0), # 42
(3.2748806345907933, 6.682858380681817, 5.164897599637681, 2.7656870506535944, 1.5980645833333331, 0.0, 4.477911679576878, 6.3922583333333325, 4.148530575980392, 3.4432650664251203, 1.6707145951704543, 0.0), # 43
(3.2806360294117645, 6.688542045454545, 5.154891666666667, 2.7642552287581696, 1.5958539007092198, 0.0, 4.449677681992337, 6.383415602836879, 4.146382843137254, 3.4365944444444443, 1.6721355113636363, 0.0), # 44
(3.286270180626598, 6.694122869318181, 5.144276494565218, 2.7627297794117642, 1.593500664893617, 0.0, 4.420024362818591, 6.374002659574468, 4.144094669117647, 3.4295176630434785, 1.6735307173295453, 0.0), # 45
(3.291786445012788, 6.6996, 5.133070289855073, 2.761112418300653, 1.5910078014184397, 0.0, 4.389013909711811, 6.364031205673759, 4.14166862745098, 3.4220468599033818, 1.6749, 0.0), # 46
(3.297188179347826, 6.704972585227273, 5.12129125905797, 2.759404861111111, 1.588378235815603, 0.0, 4.356708510328169, 6.353512943262412, 4.139107291666666, 3.4141941727053133, 1.6762431463068181, 0.0), # 47
(3.3024787404092075, 6.710239772727273, 5.108957608695651, 2.757608823529411, 1.5856148936170211, 0.0, 4.323170352323839, 6.3424595744680845, 4.136413235294117, 3.4059717391304343, 1.6775599431818182, 0.0), # 48
(3.307661484974424, 6.715400710227271, 5.096087545289855, 2.75572602124183, 1.5827207003546098, 0.0, 4.288461623354989, 6.330882801418439, 4.133589031862745, 3.3973916968599034, 1.6788501775568176, 0.0), # 49
(3.312739769820972, 6.720454545454543, 5.082699275362319, 2.7537581699346405, 1.5796985815602835, 0.0, 4.252644511077794, 6.318794326241134, 4.130637254901961, 3.388466183574879, 1.6801136363636358, 0.0), # 50
(3.317716951726343, 6.725400426136363, 5.068811005434783, 2.7517069852941174, 1.5765514627659571, 0.0, 4.215781203148426, 6.306205851063829, 4.127560477941176, 3.3792073369565214, 1.6813501065340908, 0.0), # 51
(3.322596387468031, 6.730237499999999, 5.054440942028985, 2.7495741830065357, 1.573282269503546, 0.0, 4.177933887223055, 6.293129078014184, 4.124361274509804, 3.3696272946859898, 1.6825593749999999, 0.0), # 52
(3.3273814338235295, 6.7349649147727275, 5.039607291666666, 2.7473614787581697, 1.5698939273049646, 0.0, 4.139164750957854, 6.279575709219858, 4.121042218137255, 3.359738194444444, 1.6837412286931819, 0.0), # 53
(3.332075447570333, 6.739581818181817, 5.024328260869565, 2.745070588235294, 1.5663893617021276, 0.0, 4.099535982008995, 6.2655574468085105, 4.117605882352941, 3.3495521739130427, 1.6848954545454542, 0.0), # 54
(3.336681785485933, 6.744087357954545, 5.008622056159419, 2.7427032271241827, 1.5627714982269503, 0.0, 4.05910976803265, 6.251085992907801, 4.114054840686275, 3.3390813707729463, 1.6860218394886362, 0.0), # 55
(3.341203804347826, 6.74848068181818, 4.9925068840579705, 2.740261111111111, 1.5590432624113475, 0.0, 4.017948296684991, 6.23617304964539, 4.110391666666667, 3.328337922705314, 1.687120170454545, 0.0), # 56
(3.345644860933504, 6.752760937500001, 4.976000951086956, 2.7377459558823527, 1.5552075797872338, 0.0, 3.9761137556221886, 6.220830319148935, 4.106618933823529, 3.317333967391304, 1.6881902343750002, 0.0), # 57
(3.3500083120204605, 6.756927272727271, 4.959122463768115, 2.7351594771241827, 1.5512673758865245, 0.0, 3.9336683325004165, 6.205069503546098, 4.102739215686275, 3.3060816425120767, 1.6892318181818178, 0.0), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_allighting_rate = (
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 0
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 1
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 2
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 3
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 4
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 5
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 6
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 7
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 8
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 9
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 10
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 11
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 12
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 13
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 14
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 15
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 16
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 17
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 18
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 19
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 20
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 21
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 22
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 23
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 24
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 25
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 26
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 27
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 28
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 29
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 30
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 31
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 32
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 33
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 34
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 35
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 36
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 37
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 38
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 39
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 40
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 41
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 42
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 43
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 44
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 45
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 46
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 47
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 48
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 49
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 50
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 51
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 52
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 53
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 54
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 55
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 56
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 57
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 58
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 59
)
"""
parameters for reproducibiliy. More information: https://numpy.org/doc/stable/reference/random/parallel.html
"""
#initial entropy
entropy = 258194110137029475889902652135037600173
#index for seed sequence child
child_seed_index = (
1, # 0
67, # 1
)
| 37,697 | 36,553 |
import plotly.express as px
from preprocess import PreprocessHeatmap
def get_figure(df):
pp = PreprocessHeatmap()
heatmap_df = pp.preprocess_heatmap(df)
hover_template = \
'''
<b style="font-size: 20px;>%{x}, %{y}h00</b>
<br>
<span style="font-size: 16px;>%{z:.0f} likes générés</span>
<extra></extra>
'''
fig = px.imshow(heatmap_df)
fig.update_layout(
xaxis_title='Jour de la semaine',
yaxis_title='Heure de la journée',
yaxis_nticks=24,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
fig.update_traces(hovertemplate=hover_template)
return fig | 658 | 268 |
from dom import e, Div, TextInput, Button, TextArea
from basicboard import BasicBoard
from connection import getconn
from utils import queryparams, random, setseed
mainseed = 80
class Forumnode(e):
def __init__(self, root, args = {}):
super().__init__("div")
self.root = root
self.move = args["move"]
self.uci = args["uci"]
self.comment = args["comment"]
if not self.comment:
self.comment = ""
self.owner = args["owner"]
self.fen = args["fen"]
self.parent = args["parent"]
self.isadmin = args["isadmin"]
self.halfmoveno = args["halfmoveno"]
if not self.halfmoveno:
self.halfmoveno = -1
self.childs = []
self.build()
def toobj(self):
moveobjs = {}
for child in self.childs:
moveobjs[child.move] = child.toobj()
return {
"uci": self.uci,
"comment": self.comment,
"owner": self.owner,
"fen": self.fen,
"moves": moveobjs
}
def appendchild(self, node):
node.halfmoveno = self.halfmoveno + 1
node.build()
self.childs.append(node)
self.containerdiv.a(node)
if len(self.childs) > 1:
rgb = "rgb({},{},{})".format(int(random()*128 + 127),int(random()*128 + 127),int(random()*128 + 127))
self.containerdiv.bc(rgb).bds("solid").bdw(10).bdr(20).bdc(rgb)
def addnode(self):
input = window.prompt("Move:uci:owner:fen", "")
if input:
self.root.shift()
parts = input.split(":")
self.appendchild(Forumnode(self.root, {
"move": parts[0],
"uci": None,
"comment": "",
"uci": parts[0],
"owner": parts[2],
"fen": parts[2],
"parent": self,
"isadmin": self.isadmin
}))
self.root.parse()
def edituci(self):
input = window.prompt("Uci", "")
if input:
self.uci = input
self.setboard()
self.ucidiv.html(self.uci)
self.root.parse()
def editfen(self):
input = window.prompt("Fen", "")
if input:
self.fen = input
self.setboard()
self.root.parse()
def setmovelabel(self):
if self.halfmoveno < 0:
moveno = ""
elif ( self.halfmoveno % 2 ) == 0:
moveno = ( ( self.halfmoveno + 2 ) / 2 ) + ". "
else:
moveno = ( ( self.halfmoveno + 1 ) / 2 ) + ".. "
self.movelabeldiv.html("{}{}".format(moveno, self.move))
def editsan(self):
input = window.prompt("San", "")
if input:
self.move = input
self.setmovelabel()
self.root.parse()
def editcomment(self):
input = window.prompt("Comment", self.comment)
if input:
self.comment = input
self.commentdiv.html(self.comment)
self.root.parse()
def editowner(self):
input = window.prompt("Owner", "")
if input:
self.owner = input
self.ownerdiv.html(self.owner)
self.root.parse()
def movecallback(self, variantkey, fen, uci):
if self.reqfenunderway:
print("a fen request is in progress, cannot start a new one")
return
self.root.shift()
self.root.reqfenunderway = True
self.root.reqnode = self
getconn().sioreq({
"kind": "forumgamemove",
"owner": "forumgame",
"moveuci": uci,
"variantkey": variantkey,
"fen": fen
})
def bbdragstart(self, ev):
ev.stopPropagation()
def setboard(self):
initobj = {
"fen": self.fen,
"squaresize": 20,
"showfen": False,
"movecallback": self.movecallback,
"variantkey": "atomic"
}
if self.uci:
initobj["positioninfo"] = {
"genmove": {
"uci": self.uci
}
}
b = BasicBoard(initobj)
b.cp().ae("dragstart", self.bbdragstart)
self.boarddiv.x().a(b)
def analyzelocal(self):
try:
self.root.mainboard.variantchanged("atomic", self.fen)
self.root.parenttabpane.selectbykey("board")
except:
pass
def analyzelichess(self):
window.open("https://lichess.org/analysis/atomic/" + self.fen, "_blank")
def delchilds(self):
self.childs = []
self.root.rebuild(mainseed)
def delme(self):
parent = self.parent
if parent:
newchilds = []
for child in parent.childs:
print("child", child.move, child.uci)
if not ( child == self ):
newchilds.append(child)
parent.childs = newchilds
self.root.rebuild(mainseed)
def serializefunc(self):
self.root.rebuild(mainseed + 1)
self.root.store()
def serialize(self):
self.infohook.html("serializing")
setTimeout(self.serializefunc, 100)
def copysrc(self):
self.root.copysrc()
def copylink(self):
ti = TextInput()
self.linktexthook.a(ti)
ti.setText("https://fbserv.herokuapp.com/analysis/atomic/" + self.fen.replace(" ", "%20"))
ti.e.select()
document.execCommand("copy")
self.linktexthook.x()
def build(self):
self.movediv = Div().disp("flex").fd("row").ai("center")
self.movedescdiv = Div().bc("#eee").w(110).maw(110).pad(3)
self.movelabeldiv = Div().fw("bold").pad(3).ff("monospace")
self.setmovelabel()
self.ownerdiv = Div().html(self.owner).ff("monospace").fs("10").c("#007")
self.ucidiv = Div().ff("monospace").fs("12").pad(3)
self.commentdiv = Div().fs("12").pad(5).html(self.comment)
if self.uci:
self.ucidiv.html(self.uci)
self.movedescdiv.a([self.movelabeldiv, self.ownerdiv, self.commentdiv])
self.movedescdiv.a(Button("Analyze local", self.analyzelocal).mar(2))
self.movedescdiv.a(Button("Analyze lichess", self.analyzelichess).mar(2))
self.infohook = Div().ff("monospace").pad(3).c("#007").fw("bold").html("built")
if self.isadmin:
self.movedescdiv.a(self.infohook)
self.linktexthook = Div()
self.movedescdiv.a(self.ucidiv)
self.movedescdiv.a(Button("+", self.addnode).pad(5))
self.movedescdiv.a(Button("san", self.editsan).pad(5))
self.movedescdiv.a(Button("uci", self.edituci).pad(5))
self.movedescdiv.a(Button("fen", self.editfen).pad(5))
self.movedescdiv.a(Button("comment", self.editcomment).pad(5))
self.movedescdiv.a(Button("owner", self.editowner).pad(5))
self.movedescdiv.a(Button("serialize", self.serialize).pad(5).bc("#ffa"))
self.movedescdiv.a(Button("copy", self.copysrc).pad(5).bc("#afa"))
self.movedescdiv.a(self.linktexthook)
self.movedescdiv.a(Button("link", self.copylink).pad(5).bc("#aff"))
self.movedescdiv.a(Button("delchilds", self.delchilds).pad(5).bc("#faa"))
self.movedescdiv.a(Button("delme", self.delme).pad(5).bc("#faa"))
self.boarddiv = Div().pad(2)
self.movecontainerdiv = Div().disp("flex").fd("row").ai("center")
self.movecontainerdiv.a([self.movedescdiv, self.boarddiv])
self.containerdiv = Div().disp("flex").fd("column").ai("flex-start")
self.movediv.a([self.movecontainerdiv, self.containerdiv])
self.setboard()
self.x().a(self.movediv)
self.mw(600)
class Forumgame(e):
def __init__(self):
super().__init__("div")
self.messagediv = Div().disp("inline-block").pad(3).ff("monospace")
self.contentdiv = Div()
self.a([self.messagediv, self.contentdiv])
self.reqfenunderway = False
self.reqnode = None
self.requestforumgame()
self.ae("mousemove", self.mousemove)
self.ae("mouseup", self.mouseup)
self.ae("mouseleave", self.mouseleave)
def copysrc(self):
self.textarea.e.select()
document.execCommand("copy")
window.alert("Copied source to clipboard, {} characters.".format(len(self.textarea.getText())))
def mousemove(self, ev):
if self.dragunderway:
dx = ev.clientX - self.dragstartx
dy = ev.clientY - self.dragstarty
self.parenttabpane.contentdiv.e.scrollTop = self.scrolltop + 20 * dy
self.parenttabpane.contentdiv.e.scrollLeft = self.scrollleft + 20 * dx
def mouseup(self, ev):
self.dragunderway = False
def mouseleave(self, ev):
self.dragunderway = False
def parse(self):
obj = self.rootnode.toobj()
text = JSON.stringify(obj, None, 2)
self.textarea.setText(text)
return text
def store(self):
self.parenttabpane.contentdiv.bc("#faa")
self.messagediv.html("Parsing JSON")
try:
obj = JSON.parse(self.textarea.getText())
self.messagediv.html("Storing JSON")
getconn().sioreq({
"kind": "setforumgame",
"owner": "forumgame",
"forumgame": obj
})
except:
self.messagediv.html("Error: could not parse JSON")
return
def requestforumgame(self):
getconn().sioreq({
"kind": "getforumgame",
"owner": "forumgame"
})
def buildrec(self, parentnode, tree):
__pragma__("jsiter")
if not tree["moves"]:
return
for move in tree["moves"]:
moveobj = tree["moves"][move]
node = Forumnode(self, {
"move": move,
"uci": moveobj["uci"],
"comment": moveobj["comment"],
"owner": moveobj["owner"],
"fen": moveobj["fen"],
"parent": parentnode,
"isadmin": self.isadmin
})
parentnode.appendchild(node)
self.buildrec(node, moveobj)
__pragma__("nojsiter")
def build(self, text, seed):
setseed(seed)
self.contentdiv.x().pad(3)
self.textarea = TextArea().w(1000).h(200)
self.textarea.setText(text)
self.controlpanel = Div()
self.controlpanel.a(Button("Store", self.store))
if self.isadmin:
self.contentdiv.a(self.textarea)
self.contentdiv.a(self.controlpanel)
self.rootnode = Forumnode(self, {
"move": "startpos",
"uci": None,
"owner": "Wolfram_EP",
"comment": "Forum game",
"fen": "rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1",
"parent": None,
"isadmin": self.isadmin
})
self.contentdiv.a(self.rootnode)
self.buildrec(self.rootnode, self.forumgame)
#self.rootnode.e.scrollIntoView(True)
self.parenttabpane.setscroll()
self.contentdiv.sa("draggable", True).cm().ae("dragstart", self.dragstart)
def dragstart(self, ev):
ev.preventDefault()
self.dragstartx = ev.clientX
self.dragstarty = ev.clientY
self.scrolltop = self.parenttabpane.contentdiv.e.scrollTop
self.scrollleft = self.parenttabpane.contentdiv.e.scrollLeft
self.dragunderway = True
def rebuild(self, seed):
text = self.parse()
self.forumgame = JSON.parse(text)
self.build(text, seed)
def shift(self):
sl = self.parenttabpane.contentdiv.e.scrollLeft
self.parenttabpane.contentdiv.e.scrollLeft = sl + 300
def siores(self, response):
if response["kind"] == "setforumgame":
self.forumgame = response["forumgame"]
self.messagediv.html("Forumgame loaded")
self.isadmin = response["isadmin"]
if queryparams.get("noadmin", "false") == "true":
self.isadmin = False
self.build(JSON.stringify(self.forumgame, None, 2), mainseed)
self.parenttabpane.contentdiv.bc("#def")
if response["kind"] == "setforumgamedone":
self.messagediv.html("Stored, refreshing")
self.requestforumgame()
if response["kind"] == "setforumgamefen":
posinfo = response["positioninfo"]
fen = response["fen"]
san = posinfo["genmove"]["san"]
uci = posinfo["genmove"]["uci"]
rp = self.reqnode.parent
owner = None
if rp:
owner = rp.owner
if not owner:
owner = window.prompt("Owner", "?")
if not owner:
owner = "?"
self.reqnode.appendchild(Forumnode(self, {
"move": san,
"uci": uci,
"comment": "",
"owner": owner,
"fen": fen,
"parent": self.reqnode,
"isadmin": self.isadmin
}))
self.parse() | 13,985 | 4,291 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from include import *
from timecat import detect_file_format
from timecat import detect_datetime_format
case_num = 0
def dofo(f, start, end, regex_format_info):
global case_num
case_num += 1
print("case {}".format(case_num))
res = detect_file_format(f, start, end, regex_format_info)
if res is not None:
res = {
"order": res["order"]
}
print(repr(res))
print("")
if __name__ == "__main__":
ascending_log = "testbinary.log"
with open(ascending_log) as f:
# 1. ascending
regex_format_info = detect_datetime_format("02/Oct/2016:20:13:14.666", None)
dofo(f, "02/Oct/2016:20:13:14.666", "02/Dec/2017:20:13:14.666",
regex_format_info)
descending_log = "testbinary.log.reverse"
with open(descending_log) as f:
# 2. descending
regex_format_info = detect_datetime_format("02/Oct/2016:20:13:14.666", None)
dofo(f, "02/Oct/2016:20:13:14.666", "05/Aug/2016:20:13:14.666",
regex_format_info)
ascending_log = "testbinary.log"
with open(ascending_log) as f:
regex_format_info = detect_datetime_format("2015-12-13 12:13:20", None)
# 3. ascending
dofo(f, "2015-12-13 12:13:20", None, regex_format_info)
# 4. descending
dofo(f, "2015-12-13 12:13:20", "2015-12-13 11:10:20", regex_format_info)
descending_log = "testbinary.log.reverse"
with open(descending_log) as f:
regex_format_info = detect_datetime_format("2015-12-13 11:10:20", None)
# 5. descending
dofo(f, "2015-12-13 12:13:20", None, regex_format_info)
# 6. ascending
dofo(f, "2015-12-13 12:13:20", "2016-12-13 12:13:20", regex_format_info)
| 1,783 | 796 |
"""This file contains code for use with "Think Bayes",
by Allen B. Downey, available from greenteapress.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from thinkbayes import Pmf
from bowl import Bowl
class Cookie(Pmf):
"""A map from string bowl ID to probablity."""
bowls = {
'Bowl 1': Bowl({ 'vanilla' : 30 , 'chocolate' : 10 }) ,
'Bowl 2': Bowl({ 'vanilla' : 20 , 'chocolate' : 20 })
}
def __init__(self, hypos):
"""Initialize self.
hypos: sequence of string bowl IDs
"""
Pmf.__init__(self)
for hypo in hypos:
self.Set(hypo, 1)
self.Normalize()
def Update(self, data):
"""Updates the PMF with new data.
data: string cookie type
"""
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
self.Normalize()
def Likelihood(self, data, hypo):
"""The likelihood of the data under the hypothesis.
data: string cookie type
hypo: string bowl ID
"""
bowl = self.bowls[hypo]
like = bowl.probability_of_flavors()[data]
bowl.pick_a_cookie(data, like)
bowl.print_state()
return like
def main():
hypos = ['Bowl 1', 'Bowl 2']
pmf = Cookie(hypos)
pmf.Update('vanilla')
pmf.Update('chocolate')
pmf.Update('vanilla')
pmf.Update('vanilla')
pmf.Update('vanilla')
pmf.Update('vanilla')
for hypo, prob in pmf.Items():
print hypo, prob
if __name__ == '__main__':
main()
| 1,642 | 580 |
import unittest
from models import settings
from models.mathbot import *
from models.settings import U238_DECAY_CONSTANT, U238_DECAY_CONSTANT_ERROR, TH232_DECAY_CONSTANT, \
TH232_DECAY_CONSTANT_ERROR
class MathbotTests(unittest.TestCase):
########################################
### Outlier resistant mean and stdev ###
########################################
def test_outlier_resistant_mean_no_outliers_allowed(self):
test_data = [1, 1, 2, 1, 4, 1, 2, 3, 9, 2]
mean, st_dev = calculate_outlier_resistant_mean_and_st_dev(test_data, 0)
self.assertEqual(np.mean(test_data), mean)
self.assertEqual(np.std(test_data), st_dev)
def test_outlier_resistant_mean_zeros(self):
test_data = [0] * 10
self.assertEqual((0, 0), calculate_outlier_resistant_mean_and_st_dev(test_data, 2))
def test_outlier_resistant_mean_empty_set(self):
self.assertRaises(IndexError, calculate_outlier_resistant_mean_and_st_dev, [], 2)
def test_outlier_resistant_mean_one_higher_outlier(self):
test_data = [1, 1, 1, 1, 1, 1, 1, 1, 1, 40]
mean, st_dev = calculate_outlier_resistant_mean_and_st_dev(test_data, 1)
mean_2, st_dev_2 = calculate_outlier_resistant_mean_and_st_dev(test_data, 2)
self.assertEqual(1, mean)
self.assertEqual(0, st_dev)
self.assertEqual(1, mean_2)
self.assertEqual(0, st_dev_2)
def test_outlier_resistant_mean_one_lower_outlier(self):
test_data = [1, 40, 40, 40, 40, 40, 40, 40, 40, 40]
mean, st_dev = calculate_outlier_resistant_mean_and_st_dev(test_data, 1)
mean_2, st_dev_2 = calculate_outlier_resistant_mean_and_st_dev(test_data, 2)
self.assertEqual(40, mean)
self.assertEqual(0, st_dev)
self.assertEqual(40, mean_2)
self.assertEqual(0, st_dev_2)
def test_outlier_resistant_mean_two_outliers(self):
test_data = [1, 40, 40, 40, 40, 40, 40, 40, 40, 400]
mean, st_dev = calculate_outlier_resistant_mean_and_st_dev(test_data, 1)
mean_2, st_dev_2 = calculate_outlier_resistant_mean_and_st_dev(test_data, 2)
self.assertEqual(np.mean(test_data), mean)
self.assertEqual(np.std(test_data), st_dev)
self.assertEqual(40, mean_2)
self.assertEqual(0, st_dev_2)
#######################
### Relative errors ###
#######################
def test_relative_errors_zero_case(self):
self.assertEqual(0, relative_error(0, 4))
def test_relative_errors_general(self):
self.assertEqual(0.1, relative_error(10, 1))
############################
### Errors in quadrature ###
############################
def test_errors_in_quadrature_single_error(self):
self.assertEqual(1, errors_in_quadrature([1]))
def test_errors_in_quadrature_general(self):
self.assertEqual(13, errors_in_quadrature([5, 12]))
def test_errors_in_quadrature_negative(self):
self.assertEqual(13, errors_in_quadrature([-5, 12]))
def test_errors_in_quadrature_decimals(self):
self.assertEqual(0.2, errors_in_quadrature([0.1, 0.1, 0.1, 0.1]))
########################################
### Interpolate to exponential curve ###
########################################
def test_interpolate_to_exponential(self):
a, b, y_est_rounded, y_est_rounded_uncertainty = interpolate_to_exponential((0, 10), 3, (1, 5), 2, 0.5)
self.assertEqual(10, a)
self.assertAlmostEqual(-0.693147180559945, b, 14)
self.assertAlmostEqual(7.07106781186548, y_est_rounded, 14)
self.assertAlmostEqual(1.76776695296637, y_est_rounded_uncertainty, 14)
def test_interpolate_to_exponential_invalid_points(self):
self.assertRaises(AssertionError, interpolate_to_exponential, (0, 0), 0, (0, 0), 0, 0)
self.assertRaises(AssertionError, interpolate_to_exponential, (0, 10), 0, (1, 5), 0, 2)
######################
### Activity ratio ###
######################
def test_activity_ratio_general(self):
ratio, ratio_uncertainty = activity_ratio(
cps_mass_1=10,
cps_mass_1_uncertainty=1,
decay_constant_1=2,
decay_constant_1_uncertainty=0.2,
cps_mass_2=20,
cps_mass_2_uncertainty=2,
decay_constant_2=5,
decay_constant_2_uncertainty=0.5
)
self.assertEqual(0.2, ratio)
self.assertAlmostEqual(0.04, ratio_uncertainty, 16)
def test_activity_ratio_data_values(self):
# using data from Heidelberg University 05/2020
ratio, ratio_uncertainty = activity_ratio(
cps_mass_1=12.0540007,
cps_mass_1_uncertainty=0.01,
decay_constant_1=U238_DECAY_CONSTANT,
decay_constant_1_uncertainty=U238_DECAY_CONSTANT_ERROR,
cps_mass_2=10,
cps_mass_2_uncertainty=0.01,
decay_constant_2=TH232_DECAY_CONSTANT,
decay_constant_2_uncertainty=TH232_DECAY_CONSTANT_ERROR
)
self.assertAlmostEqual(3.77943781422436, ratio, 14)
self.assertAlmostEqual(0.00531355971346501, ratio_uncertainty, 14)
def test_activity_ratio_invalid_input(self):
self.assertRaises(AssertionError, activity_ratio,
cps_mass_1=-1,
cps_mass_1_uncertainty=0.01,
decay_constant_1=U238_DECAY_CONSTANT,
decay_constant_1_uncertainty=U238_DECAY_CONSTANT_ERROR,
cps_mass_2=10,
cps_mass_2_uncertainty=0.01,
decay_constant_2=TH232_DECAY_CONSTANT,
decay_constant_2_uncertainty=TH232_DECAY_CONSTANT_ERROR
)
#########################
### Age from gradient ###
#########################
def test_age_from_gradient_zero_uncertainty(self):
age, uncertainty = calculate_age_from_values(0.5, 0, 1, 0, 0, 0)
self.assertEqual(-math.log(0.5) / settings.TH230_DECAY_CONSTANT, age)
self.assertEqual(uncertainty, 0)
def test_age_from_gradient_more_realistic(self):
age, uncertainty = calculate_age_from_values(3.02, 0.05, 6.33, 0.16, 0.32, 0.01)
self.assertEqual(-math.log(1 - (3.02 - 0.32)/(6.33 - 0.32)) / settings.TH230_DECAY_CONSTANT, age)
self.assertAlmostEqual(2459.439109, uncertainty, 6)
if __name__ == '__main__':
unittest.main()
| 6,513 | 2,529 |
from .abstract import AbstractAuthProvider, AbstractUnauthenticatedEntity
from .utils import _context_processor
from flask import _request_ctx_stack, has_request_context
class AuthManager:
def __init__(self, app=None, unauthorized_callback=None, unauthenticated_entity_class=None):
self._auth_providers = []
if unauthenticated_entity_class:
self._unauthenticated_entity_class = unauthenticated_entity_class
else:
self._unauthenticated_entity_class = AbstractUnauthenticatedEntity
self._unauthorized_callback = unauthorized_callback
if app is not None:
self.init_app(app)
def init_app(self, app):
app.auth_manager = self
app.context_processor(_context_processor)
def set_unauthenticated_entity_class(self, unauthenticated_entity_class):
self._unauthenticated_entity_class = unauthenticated_entity_class
def unauthorized(self):
if has_request_context() and hasattr(_request_ctx_stack.top, 'unauthorized_callback'):
return _request_ctx_stack.top.unauthorized_callback()
elif self._unauthorized_callback:
return self._unauthorized_callback()
else:
return 'Not authorized', 403
def get_auth_providers(self):
"""
Get a list of all registered authentication providers.
:return:List of authentication providers
"""
return self._auth_providers
def register_auth_provider(self, auth_provider):
"""
Register an authentication provider with the manager.
:param auth_provider: A valid authentication provider (i.e. an instance of a subclass of AbstractAuthenticationProvider)
"""
if auth_provider.__class__ == AbstractAuthProvider:
raise RuntimeError('Tried to add AbstractAuthProvider. Please add an implementing subclass object instead.')
elif not isinstance(auth_provider, AbstractAuthProvider):
raise ValueError('Tried to add an object which is no valid AuthProvider. Object should be instantiated from a subclass of AbstractAuthProvider.')
else:
self._auth_providers.append(auth_provider)
def _load_authenticated_entity(self):
ctx = _request_ctx_stack.top
if not self._auth_providers:
raise RuntimeError('Please register at least one authentication provider to get authenticated entities.')
for auth_provider in self._auth_providers:
entity = auth_provider.get_authenticated_entity()
if entity:
ctx.authenticated_entity = entity
return True
ctx.authenticated_entity = self._unauthenticated_entity_class()
return False
| 2,748 | 696 |
from threading import Lock
import pytest
from elmo.api.decorators import require_lock, require_session
from elmo.api.exceptions import LockNotAcquired, PermissionDenied
def test_require_session_present():
"""Should succeed if a session ID is available."""
class TestClient(object):
def __init__(self):
# Session is available
self._session_id = "test"
@require_session
def action(self):
return 42
client = TestClient()
assert client.action() == 42
def test_require_session_missing():
"""Should fail if a session ID is not available."""
class TestClient(object):
def __init__(self):
# Session is not available
self._session_id = None
@require_session
def action(self):
return 42
client = TestClient()
with pytest.raises(PermissionDenied):
client.action()
def test_require_lock():
"""Should succeed if the lock has been acquired."""
class TestClient(object):
def __init__(self):
# Lock attribute
self._lock = Lock()
@require_lock
def action(self):
return 42
client = TestClient()
client._lock.acquire()
assert client.action() == 42
def test_require_lock_fails():
"""Should fail if the lock has not been acquired."""
class TestClient(object):
def __init__(self):
# Lock attribute
self._lock = Lock()
@require_lock
def action(self):
return 42
client = TestClient()
with pytest.raises(LockNotAcquired):
client.action()
| 1,659 | 459 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from net_utils.nn_distance import nn_distance
from net_utils.relation_tool import PositionalEmbedding
from models.registers import MODULES
from models.iscnet.modules.proposal_module import decode_scores
from configs.scannet_config import ScannetConfig #param2obb
@MODULES.register_module
class RelationalProposalModule(nn.Module):
def __init__(self, cfg, optim_spec = None):
'''
Relation-based Proposal Module to enhance detected proposals.
:param config: configuration file.
:param optim_spec: optimizer parameters.
'''
super(RelationalProposalModule, self).__init__()
'''Optimizer parameters used in training'''
self.optim_spec = optim_spec
self.cfg = cfg
'''Parameters'''
self.num_class = cfg.dataset_config.num_class
self.num_heading_bin = cfg.dataset_config.num_heading_bin
self.num_size_cluster = cfg.dataset_config.num_size_cluster
appearance_feature_dim = cfg.config['model']['relation_module']['appearance_feature_dim']
key_feature_dim = cfg.config['model']['relation_module']['key_feature_dim']
geo_feature_dim = cfg.config['model']['relation_module']['geo_feature_dim']
self.isDuplication = cfg.config['model']['relation_module']['isDuplication']
self.Nr = cfg.config['model']['relation_module']['n_relations']
self.dim_g = geo_feature_dim
'''Modules'''
self.gamma = nn.Parameter(torch.ones(1)) # requires_grad is True by default for Parameter
nn.init.constant_(self.gamma, 0.0)
#if self.cfg.config['model']['relation_module']['use_learned_pos_embed']:
# self.pos_embedding = PositionEmbeddingLearned(6, geo_feature_dim)
self.relation = nn.ModuleList()
for N in range(self.Nr):
self.relation.append(RelationUnit(appearance_feature_dim, key_feature_dim=key_feature_dim, geo_feature_dim=geo_feature_dim))
##### Adding concat to f_a
self.feature_transform1 = nn.Sequential(nn.Conv1d(128,128,1), \
nn.BatchNorm1d(128), \
nn.ReLU(), \
nn.Conv1d(128, appearance_feature_dim, 1))
self.feature_transform2 = nn.Sequential(nn.Conv1d(appearance_feature_dim, 128, 1), \
nn.BatchNorm1d(128), \
nn.ReLU(), \
nn.Conv1d(128, 128, 1))
self.proposal_generation = nn.Sequential(nn.Conv1d(128,128,1), \
nn.BatchNorm1d(128), \
nn.ReLU(), \
nn.Conv1d(128,128,1), \
nn.BatchNorm1d(128), \
nn.ReLU(), \
nn.Conv1d(128,5 + self.num_heading_bin*2 + self.num_size_cluster*4 + self.num_class,1))
##### Concatenate concat to f_a
#self.feature_transform2 = nn.Sequential(nn.Conv1d(appearance_feature_dim + self.dim_g*self.Nr, 128, 1), \
# nn.BatchNorm1d(128), \
# nn.ReLU(), \
# nn.Conv1d(128, 128, 1))
#self.proposal_generation = nn.Sequential(nn.Conv1d(128,128,1), \
# nn.BatchNorm1d(128), \
# nn.ReLU(), \
# nn.Conv1d(128,128,1), \
# nn.BatchNorm1d(128), \
# nn.ReLU(), \
# nn.Conv1d(128, 5 + self.num_heading_bin*2 + self.num_size_cluster*4 + self.num_class, 1))
#self.init_weights()
#self.bn_momentum = cfg.config['bnscheduler']['bn_momentum_init']
#self.init_bn_momentum()
#self.relation.apply(init_weights)
#self.feature_transform1.apply(init_weights)
#self.feature_transform2.apply(init_weights)
#self.proposal_generation.apply(init_weights)
def forward(self, proposal_features, end_points, data, mode='train'):
if self.cfg.config['model']['relation_module']['compute_two_losses']:
prefix = 'proposal_'
else:
prefix = ''
center = end_points[f'{prefix}center'] # (B, K, 3)
if not self.cfg.config['model']['relation_module']['use_gt_boxsize'] or mode == 'test':
### Compute predicted box size
config_dict = self.cfg.eval_config
pred_size_class = torch.argmax(end_points[f'{prefix}size_scores'], -1) # B,num_proposal
size_residuals = end_points[f'{prefix}size_residuals_normalized'] * torch.from_numpy(
config_dict['dataset_config'].mean_size_arr.astype(np.float32)).cuda().unsqueeze(0).unsqueeze(0)
pred_size_residual = torch.gather(size_residuals, 2,
pred_size_class.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, 1,
3)) # B,num_proposal,1,3
pred_size_residual.squeeze_(2)
mean_size_arr = torch.from_numpy(config_dict['dataset_config'].mean_size_arr.astype(np.float32)).cuda()
pred_size_class = torch.squeeze(pred_size_class.type(torch.cuda.LongTensor)) ## Problem if batch_size==1 -> change where to squeeze
temp = mean_size_arr[pred_size_class, :]
box_size = temp + pred_size_residual
else:
### Compute GT box size
# choose the cluster for each proposal based on GT class of that proposal. GT class of each proposal is the closest GT box to each predicted proposal
aggregated_vote_xyz = end_points['aggregated_vote_xyz'] #(B,K,3)
gt_center = data['center_label'] #(B,K2,3)
_, ind1, _, _ = nn_distance(aggregated_vote_xyz, gt_center)
object_assignment = ind1 # (B,K) with values in 0,1,...,K2-1
size_class_label = torch.gather(data['size_class_label'], 1, object_assignment) # select (B,K) from (B,K2), object_assignment: (B,K) with values in 0,1,...,K2-1
size_residual_label = torch.gather(data['size_residual_label'], 1, object_assignment.unsqueeze(-1).repeat(1,1,3)) # select (B,K,3) from (B,K2,3)
mean_size_label = torch.from_numpy(self.cfg.dataset_config.mean_size_arr.astype(np.float32)).to('cuda')[size_class_label] # (B,K,3)
box_size = size_residual_label + mean_size_label # (B,K,3)
# get geometric feature and feed it into PositionalEmbedding
geometric_feature = torch.cat([center, box_size], dim=-1) # (B, K, 6)
#if not self.cfg.config['model']['relation_module']['use_learned_pos_embed']:
# position_embedding = PositionalEmbedding(geometric_feature, dim_g=self.dim_g) # (B,K,K, dim_g)
#else:
# position_embedding = self.pos_embedding(geometric_feature) #
#position_embedding = self.feature_transform_pos(proposal_features) #
# position_embedding = position_embedding.transpose(1, 2).contiguous() #
position_embedding = PositionalEmbedding(geometric_feature, dim_g=self.dim_g) # (B,K,K, dim_g)
#transform proposal_features from 128-dim to appearance_feature_dim
proposal_features = self.feature_transform1(proposal_features) #(B,appearance_feature_dim, K)
proposal_features = proposal_features.transpose(1, 2).contiguous() # (B, K, appearance_feature_dim)
# proposal_features: (B,K,appearance_feature_dim)
# positional_embedding: (B,K,K,dim_g)
if(self.isDuplication):
f_a, embedding_f_a, position_embedding = (proposal_features, position_embedding)
else:
f_a, position_embedding = (proposal_features, position_embedding) #input_data # f_a: (B,K,appearance_feature_dim), position_embedding: (B,K,K,dim_g)
isFirst=True
for N in range(self.Nr):
if(isFirst):
if(self.isDuplication):
concat = self.relation[N](embedding_f_a,position_embedding) #(B,K,dim_k)
else:
concat = self.relation[N](f_a,position_embedding)
isFirst=False
else:
if(self.isDuplication):
concat = torch.cat((concat, self.relation[N](embedding_f_a, position_embedding)), -1)
else:
concat = torch.cat((concat, self.relation[N](f_a, position_embedding)), -1)
proposal_features = self.gamma * concat + f_a # proposal_features: (B,K, appearance_feature_dim)
#proposal_features = concat
#proposal_features = f_a + concat
#proposal_features = torch.cat((f_a, concat), -1)
proposal_features = proposal_features.transpose(1,2).contiguous() #(B,appearance_feature_dim, K)
proposal_features = self.feature_transform2(proposal_features) # (B,128,K)
net = self.proposal_generation(proposal_features) # # (B, 2+3+num_heading_bin*2+num_size_cluster*4 + num_class, K)
if self.cfg.config['model']['relation_module']['compute_two_losses']:
prefix = 'last_'
else:
prefix = ''
end_points = decode_scores(net, end_points, self.num_heading_bin, self.num_size_cluster, prefix=prefix)
return end_points, proposal_features
def init_weights(self):
# initialize transformer
#for m in self.relation.parameters():
# if m.dim() > 1:
# nn.init.xavier_uniform_(m)
for m in self.feature_transform1.parameters():
if m.dim() > 1:
nn.init.xavier_uniform_(m)
for m in self.feature_transform2.parameters():
if m.dim() > 1:
nn.init.xavier_uniform_(m)
for m in self.proposal_generation.parameters():
if m.dim() > 1:
nn.init.xavier_uniform_(m)
#for m in self.prediction_heads.parameters():
# if m.dim() > 1:
# nn.init.xavier_uniform_(m)
def init_bn_momentum(self):
for m in self.modules():
if isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)):
m.momentum = self.bn_momentum
class RelationUnit(nn.Module):
def __init__(self, appearance_feature_dim=768,key_feature_dim = 96, geo_feature_dim = 96):
super(RelationUnit, self).__init__()
self.dim_g = geo_feature_dim
self.dim_k = key_feature_dim
self.WG = nn.Linear(geo_feature_dim, 1, bias=True)
self.WK = nn.Linear(appearance_feature_dim, key_feature_dim, bias=True)
self.WQ = nn.Linear(appearance_feature_dim, key_feature_dim, bias=True)
self.WV = nn.Linear(appearance_feature_dim, key_feature_dim, bias=True)
self.relu = nn.ReLU(inplace=True)
def forward(self, f_a, position_embedding):#f_a: (B,K,appearance_feature_dim), position_embedding: (B,K,K,dim_g)
B,K,_ = f_a.size()
w_g = self.relu(self.WG(position_embedding)) # (B,K,K,1)
w_k = self.WK(f_a) # (B,K,dim_k)
w_k = w_k.view(B,K,1,self.dim_k)
w_q = self.WQ(f_a) # (B,K,dim_k)
w_q = w_q.view(B,1,K,self.dim_k)
scaled_dot = torch.sum((w_k*w_q),-1 ) # (B,K,K). Note that 1st K is key, 2nd K is query
scaled_dot = scaled_dot / np.sqrt(self.dim_k)
w_g = w_g.view(B,K,K) # Note that 1st K is key, 2nd K is query
w_a = scaled_dot.view(B,K,K)
w_mn = torch.log(torch.clamp(w_g, min = 1e-6)) + w_a # (B,K,K)
w_mn = torch.nn.Softmax(dim=1)(w_mn) # compute softmax along key dimension
w_v = self.WV(f_a) # (B,K,dim_k)
w_mn = w_mn.view(B,K,K,1) # (B,K,K,1)
w_v = w_v.view(B,K,1,-1) # (B,K,1,dim_k)
output = w_mn*w_v # (B,K,K, dim_k)
output = torch.sum(output,1) # (B,K,dim_k)
return output
class PositionEmbeddingLearned(nn.Module):
"""
Absolute pos embedding, learned.
"""
def __init__(self, input_channel, num_pos_feats=128):
super().__init__()
self.position_embedding_head = nn.Sequential(
nn.Conv1d(input_channel, num_pos_feats, kernel_size=1),
nn.BatchNorm1d(num_pos_feats),
nn.ReLU(inplace=True),
nn.Conv1d(num_pos_feats, num_pos_feats, kernel_size=1))
def forward(self, xyz):
xyz = xyz.transpose(1, 2).contiguous()
position_embedding = self.position_embedding_head(xyz)
return position_embedding
#def init_weights(m):
# if type(m) == nn.Linear or type(m) == nn.Conv1d:
# gain = nn.init.calculate_gain('relu')
# nn.init.xavier_uniform_(m.weight, gain=gain)
# m.bias.data.fill_(0.01)
#gain = nn.init.calculate_gain('relu')
#nn.init.xavier_uniform_(m.weight, gain=gain)
#m.bias.data.fill_(0.01) | 13,400 | 4,647 |
from enum import Enum, auto
from typing import List, Union, Callable
from ipaddress import IPv4Address, IPv4Network, IPv6Address, IPv6Network, ip_network, ip_address
from warnings import warn
class Order(Enum):
HEADER_APPENDED = auto()
HEADER_PREPENDED = auto()
class Header:
def __init__(self,
name: str,
order:Order=Order.HEADER_APPENDED,
custom_parser: Callable[[str], Union[IPv4Address, IPv6Address]] = None
):
self.custom_parser = custom_parser
self.order = order
# header field names are case insensitive
# https://datatracker.ietf.org/doc/html/rfc7230#section-3.2
# we convert them to uppercase now to avoid different parts of code matching differenly;
# if this breaks anything, the remainder of the code is broken.
self.uppercase_name = name.upper()
class ReverseProxy:
def __init__(self,
header_added: Header,
*ip_addresses: Union[str, IPv4Address, IPv4Network, IPv6Address, IPv6Network],
):
"""
:param ip_addresses: You can use anything that ipaddress.ip_network accepts, e.g. `127.0.0.0/8` or `::1`
:param headers_added: Specify here which header this host adds. We only support one header per reverse proxy
"""
if not ip_addresses:
warn("A reverse proxy configuration without IP addresses will be ignored.")
self.header_added = header_added
ip_networks = []
for address in ip_addresses:
# Addresses will be converted to /32 resp. /128 networks, matching exactly one IP
ip_networks.append(ip_network(address))
self.ip_networks = ip_networks
| 1,760 | 507 |
from torch import nn
import torch.nn.functional as F
from locs.models.activations import ACTIVATIONS
class AnisotropicEdgeFilter(nn.Module):
def __init__(self, in_size, pos_size, hidden_size, dummy_size, out_size,
act='elu', **kwargs):
super().__init__()
self.num_relative_features = in_size
self.out_size = out_size
self._act = act
self.edge_filter = nn.Sequential(
nn.Linear(pos_size, hidden_size),
ACTIVATIONS[act](),
nn.Linear(hidden_size, self.num_relative_features * out_size),
)
self.init_weights()
def init_weights(self):
if self._act == 'elu':
gain = nn.init.calculate_gain('relu')
else:
gain = nn.init.calculate_gain(self._act)
nn.init.orthogonal_(self.edge_filter[0].weight, gain=gain)
nn.init.orthogonal_(self.edge_filter[2].weight)
def forward(self, edge_attr, edge_pos):
edge_weight = self.edge_filter(edge_pos)
edge_weight = edge_weight.reshape(
edge_weight.shape[:-1] + tuple([self.num_relative_features, -1]))
edge_attr = (edge_attr.unsqueeze(-2) @ edge_weight).squeeze(-2)
return edge_attr
class MLPEdgeFilter(nn.Module):
"""2-layer MLP, follows same template as AnisotropicEdgeFilter"""
def __init__(self, in_size, pos_size, hidden_size, bottleneck_size,
out_size, do_prob=0.0):
super().__init__()
self.num_relative_features = in_size
self.out_size = out_size
self.hidden_size = bottleneck_size
self.lin1 = nn.Linear(self.num_relative_features, bottleneck_size)
self.drop1 = nn.Dropout(p=do_prob)
self.lin2 = nn.Linear(bottleneck_size, out_size)
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
m.bias.data.fill_(0.1)
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, edge_attr, edge_pos):
edge_attr = F.relu(self.lin1(edge_attr))
edge_attr = self.drop1(edge_attr)
edge_attr = F.relu(self.lin2(edge_attr))
return edge_attr
| 2,333 | 802 |
#! /usr/bin/env python
"""
Daniel Gorrie
Large dataset sampler
"""
import random
import os
from os import listdir
from os.path import isfile, join
# Constants
INPUT_FILE = 'train.features'
INPUT_FILE_SIZE = 8352136
OUTPUT_FILE = 'train_small.features'
SAMPLE_SIZE = 110000
INPUT_LABEL_DIR = 'labels/'
OUTPUT_LABEL_DIR = 'labels_small/'
def main():
random.seed()
# Generate array of SAMPLE_SIZE random integers in range [0, INPUT_FILE.length)
# Iterate over the input file grabbing the
indices = dict.fromkeys([random.randint(0, INPUT_FILE_SIZE) for _ in xrange(SAMPLE_SIZE)])
while len(indices) < SAMPLE_SIZE:
indices[random.randint(0, INPUT_FILE_SIZE)] = 0
# Grab the proper training data
with open(OUTPUT_FILE, 'w') as out:
with open(INPUT_FILE, 'r') as f:
line_count = 0
for line in f:
if line_count in indices:
# append the line to the output file
out.write(line)
line_count += 1
# Grab the label files
label_files = [ f for f in listdir(INPUT_LABEL_DIR) if isfile(join(INPUT_LABEL_DIR,f)) ]
# make a new directory
d = os.path.dirname(OUTPUT_LABEL_DIR)
if not os.path.exists(d):
os.makedirs(d)
# put versions of all the label files in the output directory
for label_file in label_files:
with open(INPUT_LABEL_DIR + label_file, 'r') as f:
with open (OUTPUT_LABEL_DIR + label_file, 'w') as out:
line_count = 0
for line in f:
if line_count in indices:
# append the line to the output file
out.write(line)
line_count += 1
if __name__ == '__main__':
main()
| 1,798 | 601 |
import re
import urllib.request
"""
Collection of handy functions related to uniprot. Potential reimplementations
of code that would be available in various packages with the goal of keeping
dependencies at a minimum.
"""
def valid_uniprot_ac_pattern(uniprot_ac):
"""
Checks whether Uniprot AC is formally correct according to
https://www.uniprot.org/help/accession_numbers
This is no check whether it actually exists.
:param uniprot_ac: Accession code to be checked
"""
ac_pat = "[OPQ][0-9][A-Z0-9]{3}[0-9]|[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2}"
if re.match(ac_pat, uniprot_ac):
return True
else:
return False
def seq_from_ac(uniprot_ac):
"""
Fetches raw sequence string for given uniprot accession code
:param uniprot_ac: Accession code for which you want the sequence
"""
if not valid_uniprot_ac_pattern(uniprot_ac):
raise RuntimeError("Uniprot AC does not look valid")
data = None
try:
# that's the default uniprot access
url = "https://www.uniprot.org/uniprot/%s.fasta" % uniprot_ac
with urllib.request.urlopen(url) as response:
data = response.readlines()
except:
# this is only temporary, as SARS-CoV2 is not yet in uniprot
url = (
"https://www.ebi.ac.uk/uniprot/api/covid-19/uniprotkb/accession/%s.fasta"
% (uniprot_ac)
)
with urllib.request.urlopen(url) as response:
data = response.readlines()
return "".join(line.decode().strip() for line in data[1:])
| 1,581 | 541 |
from typing import Dict, Union, Optional
from uuid import UUID
import requests
from requests.auth import HTTPBasicAuth
from yarl import URL
class BaseAdapter:
"""Default handlers for a given node connection. Methods should be overridden for each team, as needed."""
def __init__(self) -> None:
super().__init__()
self.session = requests.session()
self.session.headers['Accept'] = 'application/json'
def post_inbox_item(self, request, *args, **kwargs):
return request
def send_to_inbox(self, node, author_uuid: Union[str, UUID], post_json: Dict, *args, **kwargs) -> requests.Response:
return self.session.post(
self.get_inbox_url(node, author_uuid),
auth = HTTPBasicAuth(node.node_username, node.node_password),
json = post_json
)
def get_author(self, node, author_uuid: Union[str, UUID], *args, **kwargs) -> requests.Response:
return self.session.get(
self.get_author_url(node, author_uuid),
auth = HTTPBasicAuth(node.node_username, node.node_password)
)
def get_authors(self, node, *args, **kwargs) -> requests.Response:
return self.session.get(
self.get_authors_url(node),
params = { 'size': 1000 },
headers = { 'Accept': 'application/json' },
auth = HTTPBasicAuth(node.node_username, node.node_password)
)
def shape_author(self, node, author_uuid: Union[str, UUID], response: requests.Response, *args, **kwargs) -> Optional[Dict]:
if response.ok:
return response.json()
else:
return None
def shape_authors(self, node, response: requests.Response, *args, **kwargs) -> Optional[Dict]:
if response.ok:
return response.json()
else:
return None
def get_author_url(self, node, author_uuid: Union[str, UUID], *args, **kwargs) -> str:
if isinstance(author_uuid, UUID):
author_uuid = str(author_uuid)
return (URL(node.host) / node.prefix / 'author' / author_uuid / '').human_repr()
def get_authors_url(self, node, *args, **kwargs) -> str:
return (URL(node.host) / node.prefix / 'authors' / '').human_repr()
def get_followers(self, node, author_uuid: Union[str, UUID], *args, **kwargs):
if isinstance(author_uuid, UUID):
author_uuid = str(author_uuid)
return self.session.get(
self.get_followers_url(node, author_uuid),
auth = HTTPBasicAuth(node.node_username, node.node_password)
)
def get_followers_url(self, node, author_uuid: Union[str, UUID], *args, **kwargs) -> str:
if isinstance(author_uuid, UUID):
author_uuid = str(author_uuid)
return (URL(node.host) / node.prefix / 'author' / author_uuid / 'followers' / '').human_repr()
def send_friend_request(self, node, author_uuid: Union[str, UUID], follower_json: Dict, *args, **kwargs):
return self.session.post(
self.get_inbox_url(node, author_uuid),
auth = HTTPBasicAuth(node.node_username, node.node_password),
json = follower_json
)
def get_inbox_url(self, node, author_uuid: Union[str, UUID], *args, **kwargs):
if isinstance(author_uuid, UUID):
author_uuid = str(author_uuid)
return (URL(node.host) / node.prefix / 'author' / author_uuid / 'inbox' / '').human_repr()
def remove_follower(self, node, author_uuid: Union[str, UUID], user_uuid: Union[str, UUID], *args, **kwargs):
return self.session.delete(
self.get_follower_url(node, author_uuid, user_uuid),
auth = HTTPBasicAuth(node.node_username, node.node_password)
)
def get_follower_url(self, node, author_uuid: Union[str, UUID], user_uuid: Union[str, UUID], *args, **kwargs) -> str:
if isinstance(author_uuid, UUID):
author_uuid = str(author_uuid)
if isinstance(user_uuid, UUID):
user_uuid = str(user_uuid)
return (URL(node.host) / node.prefix / 'author' / author_uuid / 'follower' / user_uuid / '').human_repr()
def get_posts(self, node, author_uuid: Union[str, UUID], *args, **kwargs):
if isinstance(author_uuid, UUID):
author_uuid = str(author_uuid)
return requests.get(
self.get_posts_url(node, author_uuid, include_slash = True),
headers = { 'Accept': 'application/json' },
auth = HTTPBasicAuth(node.node_username, node.node_password) # Shouldn't need but in case
)
def get_posts_url(self, node, author_uuid: Union[str, UUID], *args, **kwargs):
url = URL(node.host) / node.prefix / 'author' / str(author_uuid) / 'posts'
if kwargs.get('include_slash') and kwargs['include_slash'] == True:
url /= ''
return url.human_repr()
class Team1Adapter(BaseAdapter):
pass
class Team4Adapter(BaseAdapter):
def get_author_url(self, node, author_uuid: Union[str, UUID], *args, **kwargs) -> str:
if isinstance(author_uuid, UUID):
author_uuid = str(author_uuid)
return (URL(node.host) / node.prefix / 'author' / author_uuid).human_repr()
def get_followers_url(self, node, author_uuid: Union[str, UUID], *args, **kwargs) -> str:
if isinstance(author_uuid, UUID):
author_uuid = str(author_uuid)
return (URL(node.host) / node.prefix / 'author' / author_uuid / 'followers').human_repr()
def get_follower_url(self, node, author_uuid: Union[str, UUID], user_uuid: Union[str, UUID], *args, **kwargs) -> str:
if isinstance(author_uuid, UUID):
author_uuid = str(author_uuid)
if isinstance(user_uuid, UUID):
user_uuid = str(user_uuid)
return (URL(node.host) / node.prefix / 'author' / author_uuid / 'follower' / user_uuid).human_repr()
def get_inbox_url(self, node, author_uuid: Union[str, UUID], *args, **kwargs):
if isinstance(author_uuid, UUID):
author_uuid = str(author_uuid)
return (URL(node.host) / node.prefix / 'author' / author_uuid / 'inbox').human_repr()
def get_posts_url(self, node, author_uuid: Union[str, UUID], *args, **kwargs):
return super().get_posts_url(node, author_uuid, *args, include_slash = False)
class Team7Adapter(BaseAdapter):
def shape_author(self, node, author_uuid: Union[str, UUID], *args, **kwargs) -> Optional[Dict]:
response = super().get_author(node, author_uuid, *args, **kwargs)
if response.ok:
if node.host in response.json()['id']:
return response.json()
else:
return None
def shape_authors(self, node, *args, **kwargs) -> Optional[Dict]:
response = super().get_authors(node, *args, **kwargs)
if response.ok:
output_json = response.json()
new_items = list()
for item in output_json['items']:
if node.host in item['id']:
new_items.append(item)
output_json['items'] = new_items
return output_json
# A global list of adapters that are tied to nodes via the database.
registered_adapters: Dict[str, BaseAdapter] = {
'default': BaseAdapter(),
'team_1': Team1Adapter(),
'team_4': Team4Adapter(),
'team_7': Team7Adapter(),
}
| 7,414 | 2,331 |
#!/usr/bin/env python
'''
Copyright (c) 2016, Allgeyer Tobias, Aumann Florian, Borella Jocelyn, Karrenbauer Oliver, Marek Felix, Meissner Pascal, Stroh Daniel, Trautmann Jeremias
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
#Manually draw frustum at current robot pose in RViz.
import roslib
import rospy
from asr_robot_model_services.msg import RobotStateMessage
from asr_robot_model_services.srv import CalculateCameraPose, GetPose
from asr_next_best_view.srv import TriggerFrustumVisualization
def get_camera_pose_cpp():
"""
Returns camera pose
"""
rospy.wait_for_service('/asr_robot_model_services/GetCameraPose', timeout=5)
pose = rospy.ServiceProxy('/asr_robot_model_services/GetCameraPose',GetPose)
return pose().pose
def get_camera_frustum(camera_pose):
try:
rospy.wait_for_service('/nbv/trigger_frustum_visualization', timeout=5)
get_frustum = rospy.ServiceProxy(
'/nbv/trigger_frustum_visualization', TriggerFrustumVisualization)
get_frustum(camera_pose)
except (rospy.ServiceException, rospy.exceptions.ROSException), e:
rospy.logwarn(str(e))
def main():
camera_pose = get_camera_pose_cpp()
get_camera_frustum(camera_pose)
if __name__ == "__main__":
main() | 2,672 | 902 |
from .auth import login_required
| 33 | 9 |
from geom2d import Segment, make_vector_between
from structures.model.bar import StrBar
from .node import StrNodeSolution
class StrBarSolution:
"""
A truss structure bar with the solution values included.
This class is a decorator of the original `StrBar` class that's
linked to the solution nodes, that include their displacement
vectors. It's thanks to the solution displaced nodes that we
can obtain the stress and strain values for the bar.
"""
def __init__(
self,
original_bar: StrBar,
start_node: StrNodeSolution,
end_node: StrNodeSolution
):
if original_bar.start_node.id != start_node.id:
raise ValueError('Wrong start node')
if original_bar.end_node.id != end_node.id:
raise ValueError('Wrong end node')
self.__original_bar = original_bar
self.start_node = start_node
self.end_node = end_node
@property
def id(self):
"""
The original bar's identifier.
:return: id for the bar
"""
return self.__original_bar.id
@property
def cross_section(self):
"""
The original bar's cross section area value.
:return: the cross section
"""
return self.__original_bar.cross_section
@property
def young_mod(self):
"""
The original bar's Young modulus (or elasticity modulus).
:return: the Young modulus
"""
return self.__original_bar.young_mod
@property
def original_geometry(self):
"""
The original bar's geometry described by a line segment.
:return: the bar's geometry
"""
return self.__original_bar.geometry
@property
def final_geometry(self):
"""
The bar's geometry, described by a line segment, after the
computed displacements are applied.
:return: the solution bar's geometry
"""
return Segment(
self.start_node.displaced_pos,
self.end_node.displaced_pos
)
@property
def original_length(self):
"""
The original bar's length. This is, the distance between
its nodes.
:return: the bar's length
"""
return self.original_geometry.length
@property
def final_length(self):
"""
The bar's length after the computed displacements are
applied. This is the distance between the solution nodes.
:return: the solution bar's length
"""
return self.final_geometry.length
@property
def elongation(self):
"""
The difference between the solution bar's length and the
original bar's length.
A positive elongation means the bar has elongated (due to
a tensile stress) and a negative elongation means the bar
has shortened (due to a compressive stress).
:return: the bar's elongation
"""
return self.final_length - self.original_length
@property
def strain(self):
"""
The bar's elongation per unit of length. This is a
unit-less quantity.
:return: the bar's strain
"""
return self.elongation / self.original_length
@property
def stress(self):
"""
The bar's axial force per unit of cross section area.
Using Hooke's law, the stress can be computed as the
product of the bar's strain and Young modulus.
:return:
"""
return self.young_mod * self.strain
@property
def internal_force_value(self):
"""
The bar's internal force.
:return: the bar's internal force
"""
return self.stress * self.cross_section
def force_in_node(self, node: StrNodeSolution):
"""
Returns the force this bar exerts on of of its to nodes.
The passed in node needs to be one or the bar's end nodes,
otherwise, this method will throw a `ValueError`.
:param node: one of the bar's end nodes
:return: force exerted by the bar on the given node
"""
if node is self.start_node:
return make_vector_between(
self.end_node.displaced_pos,
self.start_node.displaced_pos
).with_length(
self.internal_force_value
)
elif node is self.end_node:
return make_vector_between(
self.start_node.displaced_pos,
self.end_node.displaced_pos
).with_length(
self.internal_force_value
)
raise ValueError(
f'Bar {self.id} does not know about node {node.id}'
)
def has_node(self, node: StrNodeSolution):
"""
Tests whether the given `node` is one of this bar's end
nodes.
:param node: structure node
:return: is the node connected with this bar?
"""
return node is self.start_node or node is self.end_node
def final_geometry_scaling_displacement(self, scale: float):
"""
Computes the geometry of the bar after the displacements
of its nodes have been applied with a given scale factor.
This scaled geometry can be used for drawing the solution
diagram.
:param scale: used to scale the displacements
:return: the solution bar's final geometry scaled
"""
return Segment(
self.start_node.displaced_pos_scaled(scale),
self.end_node.displaced_pos_scaled(scale)
)
| 5,643 | 1,530 |
import pytest
pytestmark = pytest.mark.forked
from unladenchant.resourcecheck import MetaMixinResourceChecker
class MetaClassTest(MetaMixinResourceChecker, type):
pass
class BaseClass(metaclass=MetaClassTest):
pass
## Testing pass and failure
def rescheckPassed():
return True
def rescheckFailed():
return False
def test_will_pass_if_res_okay():
class NewClass(BaseClass):
RESCHECK = (rescheckPassed,)
def test_will_fail_if_res_not_okay():
with pytest.raises(EnvironmentError):
class NewClass(BaseClass):
RESCHECK = (rescheckFailed,)
def test_will_fail_if_res_not_okay_partially():
with pytest.raises(EnvironmentError):
class NewClass(BaseClass):
RESCHECK = (rescheckFailed, rescheckPassed)
## Testing multiple checks
@pytest.fixture
def fixtureCheckCount():
mTimes = {
'resCheck2': 0,
'resCheck': 0
}
def funCheckRes():
mTimes['resCheck'] += 1
return True
def funCheckRes2():
mTimes['resCheck2'] += 1
return True
return mTimes, funCheckRes, funCheckRes2
def test_did_run_once(fixtureCheckCount):
mTimes, funCheckRes, funCheckRes2 = fixtureCheckCount
class NewClass(BaseClass):
RESCHECK = (funCheckRes, )
assert mTimes == {'resCheck': 1, 'resCheck2': 0}
def test_did_run_once_only(fixtureCheckCount):
mTimes, funCheckRes, funCheckRes2 = fixtureCheckCount
class NewClass(BaseClass):
RESCHECK = (funCheckRes, )
class NewClass2(NewClass):
RESCHECK = (funCheckRes, funCheckRes2)
assert mTimes == {'resCheck': 1, 'resCheck2': 1}
| 1,631 | 544 |
import sys
input = lambda: sys.stdin.readline().rstrip()
n = int(input())
i = 666
c = 0
while True:
if str(i).find("666") != -1:
c += 1
if c == n:
print(i)
break
i += 1
| 219 | 90 |
from peitho.errors_and_parsers.abc_sysbio.abcsysbio_parser.ODEPythonWriter import ODEPythonWriter
from peitho.errors_and_parsers.abc_sysbio.abcsysbio_parser.GillespiePythonWriter import GillespiePythonWriter
from peitho.errors_and_parsers.abc_sysbio.abcsysbio_parser.SDEPythonWriter import SDEPythonWriter
from peitho.errors_and_parsers.abc_sysbio.abcsysbio_parser.ODECUDAWriter import OdeCUDAWriter
from peitho.errors_and_parsers.abc_sysbio.abcsysbio_parser.SDECUDAWriter import SdeCUDAWriter
from peitho.errors_and_parsers.abc_sysbio.abcsysbio_parser.GillespieCUDAWriter import GillespieCUDAWriter
#from CWriter import CWriter
from peitho.errors_and_parsers.abc_sysbio.abcsysbio_parser.CandPythonParser import CandPythonParser
from peitho.errors_and_parsers.abc_sysbio.abcsysbio_parser.SDEAndGillespieCUDAParser import SdeAndGillespieCUDAParser
from peitho.errors_and_parsers.abc_sysbio.abcsysbio_parser.ODECUDAParser import OdeCUDAParser
import re
def ParseAndWrite(source, integrationType, modelName = None, inputPath = "", outputPath = "", method = None):
"""
***** args *****
source:
a list of strings.
Each tuple entry describes a SBML file to be parsed.
integrationType:
a list of strings.
The length of this tuple is determined by the number of SBML
files to be parsed. Each entry describes the simulation algorithm.
Possible algorithms are:
ODE --- for deterministic systems; solved with odeint (scipy)
SDE --- for stochastic systems; solved with sdeint (abc)
Gillespie --- for staochastic systems; solved with GillespieAlgorithm (abc)
***** kwargs *****
modelName:
a list of strings.
modelName describes the names of the parsed model files.
method:
an integer number.
Type of noise in a stochastic system.
(Only implemented for stochastic systems solved with sdeint.)
Possible options are:
1 --- default
2 --- Ornstein-Uhlenbeck
3 --- geometric Brownian motion
"""
#regular expressions for detecting integration types and integration language
c=re.compile('C', re.IGNORECASE)
py=re.compile('Python', re.I)
cuda=re.compile('CUDA', re.I)
ode=re.compile('ODE', re.I)
sde=re.compile('SDE', re.I)
heun=re.compile('Heun', re.I)
milstein=re.compile('Milstein', re.I)
gil = re.compile('Gillespie', re.I)
#check that you have appropriate lengths of integration types and sources
#(need equal lengths)
if(not(len(source)==len(integrationType))):
print "\nError: Number of sources is not the same as number of integrationTypes!\n"
#check that you have model names,
#if not the models will be named model1, model2, etc
else:
if(modelName==None):
modelName=[]
for x in range(0,len(source)):
modelName.append("model"+repr(x+1))
else:
for x in range(0,len(modelName)):
if(modelName[x]==""):
modelName[x]="model"+repr(x+1)
#if no method is specified and the integrationType is "SDE"
#the method type defaults to 1
for model in range(0,len(source)):
if cuda.search(integrationType[model]):
if(not(sde.search(integrationType[model]) or gil.search(integrationType[model]) or ode.search(integrationType[model]))):
print "\nError: an integration type is required for CUDA"
elif (sde.search(integrationType[model])):
if(heun.search(integrationType[model]) or milstein.search(integrationType[model])):
print "\nError: Only Euler is available in Cuda"
else:
if(method==None or method[model]==""):
parser = SdeAndGillespieCUDAParser(source[model], modelName[model], "CUDA SDE", 1, inputPath, outputPath)
else:
parser = SdeAndGillespieCUDAParser(source[model], modelName[model], "CUDA SDE", method[model], inputPath, outputPath)
elif(gil.search(integrationType[model])):
parser = SdeAndGillespieCUDAParser(source[model], modelName[model], integrationType[model], None, inputPath, outputPath)
else:
parser = OdeCUDAParser(source[model], modelName[model], integrationType[model], None, inputPath, outputPath)
elif c.search(integrationType[model]):
if (sde.search(integrationType[model])):
if (not (method==None or method==1)):
print "\nError: Only the method 1 of SDE resolution can be used in C"
else:
parser = CandPythonParser(source[model],modelName[model], "C", None, inputPath, outputPath)
else:
parser = CandPythonParser(source[model],modelName[model], "C", None, inputPath, outputPath)
elif py.search(integrationType[model]):
if(integrationType==None):
print "\nError: an integration type is required for Python"
elif (sde.search(integrationType[model])):
if(heun.search(integrationType[model]) or milstein.search(integrationType[model])):
print "\nError: Only Euler is available in Python"
else:
if(method==None or method[model]==""):
parser = CandPythonParser(source[model], modelName[model], "Python SDE", 1, inputPath, outputPath)
else:
parser = CandPythonParser(source[model], modelName[model], "Python SDE", method[model], inputPath, outputPath)
else:
parser = CandPythonParser(source[model], modelName[model], integrationType[model], None, inputPath, outputPath)
| 6,419 | 1,740 |
import unittest
import os
import sys
class ArgCatUnitTest(unittest.TestCase):
def __init__(self, methodName: str) -> None:
super().__init__(methodName=methodName)
self._cur_path = os.path.dirname(os.path.abspath(sys.modules[self.__module__].__file__))
def abs_path_of_test_file(self, file_name: str) -> str:
return os.path.join(self._cur_path, file_name)
| 390 | 134 |
year= int(input("Give the value of year:"))
if((year%4==0 and year%100==0) or (Year%400==0)):
print("This is a leap year")
else:
print("This is not leap year")
| 173 | 73 |
from __future__ import absolute_import
# -- IMPORT -- #
from ..utils.interfaces import Method
class PatternAttribution(Method):
"""CLASS::PatternAttribution:
---
Description:
---
> Gets the attribution using patterns.
Arguments:
---
Link:
---
>- http://arxiv.org/abs/1705.05598."""
def __init__(self):
raise NotImplementedError
def interpret(self):
"""METHOD::INTERPRET:
---
Arguments:
---
Returns:
---"""
pass
def __repr__(self):
return super().__repr__()+self.__class__.__name__+'>' | 535 | 224 |
def align(value, alignment=0x1000):
if value % alignment == 0:
return value
return value + (alignment - (value % alignment)) | 140 | 43 |
# stdlib modules
try:
from urllib.response import addinfourl
from urllib.error import HTTPError
from urllib.request import HTTPHandler
from io import StringIO
except ImportError:
from urllib2 import addinfourl, HTTPError, HTTPHandler
from StringIO import StringIO
def mock_response(req):
url = req.get_full_url()
if url.startswith("http://valid"):
resp = addinfourl(StringIO("valid"), "valid", url)
resp.code = 200
resp.msg = "OK"
resp.headers = {"content-disposition": "filename=valid.tar"}
return resp
elif url.startswith("http://filename"):
resp = addinfourl(StringIO("filename"), "filename", url)
resp.code = 200
resp.msg = "OK"
resp.headers = {}
return resp
elif url.startswith("http://invalid"):
raise HTTPError(url, 404, "invalid", "", StringIO())
class MockHTTPHandler(HTTPHandler):
def http_open(self, req):
return mock_response(req)
| 988 | 301 |
'''Slickbird collection handler'''
import logging
import json
from tornado.web import URLSpec
import tornado.web
from slickbird import datparse
import slickbird.orm as orm
import slickbird
from slickbird.web import hbase
def _log():
if not _log.logger:
_log.logger = logging.getLogger(__name__)
return _log.logger
_log.logger = None
# Add handler: ###############################################################
class CollectionAddHandler(hbase.PageHandler):
name = 'collection_add'
@tornado.gen.coroutine
def collectionadd(self, cadder, dat):
for gn, gd in dat['games'].items():
cadder.game_add(gn, gd)
yield tornado.gen.moment
cadder.done()
self.settings['session'].commit()
@tornado.gen.coroutine
def post(self):
name = self.get_argument('name')
directory = self.get_argument('directory')
filename = self.request.files['datfile'][0]['filename']
dat = datparse.parse(
datstr=self.request.files['datfile'][0]['body'].decode('utf-8'))
cadder = slickbird.CollectionAdder(
self.settings['session'], self.settings['home'],
name, directory, filename, dat)
self.redirect(self.reverse_url('game_lst', cadder.name))
tornado.ioloop.IOLoop.current() \
.spawn_callback(self.collectionadd, cadder, dat)
# API: #######################################################################
class CollectionListDataHandler(tornado.web.RequestHandler):
def get(self):
self.write(json.dumps([c.as_dict()
for c in self.settings['session'].query(orm.Collection)]))
# Install: ###################################################################
def install(app):
app.add_handlers('.*', [
URLSpec(r'/collection/add',
CollectionAddHandler,
name='collection_add'),
URLSpec(r'/collection/list',
hbase.genPageHandler('collection_lst'),
name='collection_lst'),
URLSpec(r'/api/collection_lst.json',
CollectionListDataHandler,
name='api_collection_lst'),
])
| 2,192 | 617 |
import pydbus
bus = pydbus.SystemBus()
adapter = bus.get('org.bluez', '/org/bluez/hci0')
mngr = bus.get('org.bluez', '/')
def list_connected_devices():
connected = []
mngd_objs = mngr.GetManagedObjects()
for path in mngd_objs:
con_state = mngd_objs[path].get('org.bluez.Device1', {}).get('Connected', False)
if con_state:
addr = mngd_objs[path].get('org.bluez.Device1', {}).get('Address')
name = mngd_objs[path].get('org.bluez.Device1', {}).get('Name')
connected.append({'name': name, 'address': addr})
return connected
if __name__ == '__main__':
connected = list_connected_devices()
for item in connected:
print(item['name']) | 715 | 255 |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, fnmatch
import logging
from shutil import copyfile
from airflow.contrib.hooks.fs_hook import FSHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from datetime import datetime
# You can also make this format a parameter in the Operator, for example
# if you expect that you work with different intervals than "@daily".
# Then you can introduce time components to have a finer grain for file storage.
DATE_FORMAT = '%Y%m%d'
class FileToPredictableLocationOperator(BaseOperator):
"""
Picks up a file from somewhere and lands this in a predictable location elsewhere
"""
template_fields = ('file_mask',)
@apply_defaults
def __init__(self,
src_conn_id,
dst_conn_id,
file_mask,
*args,
**kwargs):
"""
:param src_conn_id: Hook with a conn id that points to the source directory.
:type src_conn_id: string
:param dst_conn_id: Hook with a conn id that points to the destination directory.
:type dst_conn_id: string
"""
super(FileToPredictableLocationOperator, self).__init__(*args, **kwargs)
self.src_conn_id = src_conn_id
self.dst_conn_id = dst_conn_id
self.file_mask = file_mask
def execute(self, context):
"""
Picks up all files from a source directory and dumps them into a root directory system,
organized by dagid, taskid and execution_date
"""
execution_date = context['execution_date'].strftime(DATE_FORMAT)
src_hook = FSHook(conn_id=self.src_conn_id)
source_dir = src_hook.get_path()
dest_hook = FSHook(conn_id=self.dst_conn_id)
dest_root_dir = dest_hook.get_path()
dag_id = self.dag.dag_id
task_id = self.task_id
logging.info("Now searching for files like {0} in {1}".format(self.file_mask, source_dir))
file_names = fnmatch.filter(os.listdir(source_dir), self.file_mask)
for file_name in file_names:
full_path = os.path.join(source_dir, file_name)
dest_dir = os.path.join(dest_root_dir, dag_id, task_id, execution_date)
logging.info("Now creating path structure {0}".format(dest_dir))
os.makedirs(dest_dir)
dest_file_name = os.path.join(dest_dir, os.path.basename(file_name))
logging.info("Now moving {0} to {1}".format(full_path, dest_file_name))
copyfile(full_path, dest_file_name)
class PredictableLocationToFinalLocationOperator(BaseOperator):
"""
Picks up a file from predictable location storage and loads/transfers the results to
a target system (in this case another directory, but it could be anywhere).
"""
@apply_defaults
def __init__(self,
src_conn_id,
dst_conn_id,
src_task_id,
*args,
**kwargs):
"""
:param src_conn_id: Hook with a conn id that points to the source directory.
:type src_conn_id: string
:param dst_conn_id: Hook with a conn id that points to the destination directory.
:type dst_conn_id: string
:param src_task_id: Source task that produced the file of interest
:type src_task_id: string
"""
super(PredictableLocationToFinalLocationOperator, self).__init__(*args, **kwargs)
self.src_conn_id = src_conn_id
self.dst_conn_id = dst_conn_id
self.src_task_id = src_task_id
def execute(self, context):
"""
Picks up all files from a source directory and dumps them into a root directory system,
organized by dagid, taskid and execution_date
"""
execution_date = context['execution_date'].strftime(DATE_FORMAT)
src_hook = FSHook(conn_id=self.src_conn_id)
dest_hook = FSHook(conn_id=self.dst_conn_id)
dest_dir = dest_hook.get_path()
dag_id = self.dag.dag_id
source_dir = os.path.join(src_hook.get_path(), dag_id, self.src_task_id, execution_date)
if os.path.exists(source_dir):
for file_name in os.listdir(source_dir):
full_path = os.path.join(source_dir, file_name)
dest_file_name = os.path.join(dest_hook.get_path(), file_name)
logging.info("Now moving {0} to final destination {1}".format(full_path, dest_file_name))
copyfile(full_path, dest_file_name)
| 5,079 | 1,507 |
import numpy as np
import PIL.ImageColor, PIL.ImageFont
from .rescale import rescale
def _rgb(x):
"""Convert 0-1 values to RGB 0-255 values.
"""
return rescale(x, to=[0, 255], scale=[0, 1])
def _color(color="black", alpha=1, mode="RGB"):
"""Sanitize color to RGB(A) format.
"""
if isinstance(color, str):
if color == "transparent":
return (0, 0, 0, 0)
color = PIL.ImageColor.getrgb(color)
elif isinstance(color, (int, np.integer)):
color = tuple([color] * 3)
elif isinstance(color, (list, np.ndarray)):
color = tuple(color)
# Add transparency
if mode == "RGBA":
if len(color) == 3:
color = color + tuple([np.int(_rgb(alpha))])
return color
def _coord_circle(image, diameter=0.1, x=0, y=0, unit="grid", method="pil"):
"""Get circle coordinates
Examples
--------
>>> import pyllusion as ill
>>> import PIL.Image, PIL.ImageDraw
>>>
>>> image = PIL.Image.new('RGB', (500, 400), color = "white")
>>> draw = PIL.ImageDraw.Draw(image, 'RGBA')
>>>
>>> coord = _coord_circle(image, diameter=1, x=0, y=0)
>>> draw.ellipse(coord, fill="red", width=0)
>>> draw.ellipse(_coord_circle(image, diameter=1.5, x=0, y=0), outline="blue")
>>> image #doctest: +ELLIPSIS
<PIL.Image.Image ...>
"""
if unit == "grid":
# Get coordinates in pixels
width, height = image.size
x = np.int(rescale(x, to=[0, width], scale=[-1, 1]))
if method == "pil":
y = np.int(rescale(-y, to=[0, height], scale=[-1, 1]))
elif method == "psychopy":
y = np.int(rescale(y, to=[0, height], scale=[-1, 1]))
# Convert diameter based on height
diameter = np.int(rescale(diameter, to=[0, height], scale=[0, 2]))
diameter = 2 if diameter < 2 else diameter
radius = diameter / 2
# Choose diameter and centre
coord = [(x - radius, y - radius), (x + radius, y + radius)]
if method == "pil":
return coord
elif method == "psychopy":
return radius, x, y
def _coord_text(
image, text="hello", size="auto", x=0, y=0, font="arial.ttf", unit="grid",
method="pil"
):
"""Get text coordinates
Examples
--------
>>> import pyllusion as ill
>>> import PIL.Image, PIL.ImageDraw
>>>
>>> image = PIL.Image.new('RGB', (500, 500), color = "white")
>>> draw = PIL.ImageDraw.Draw(image, 'RGB')
>>>
>>> coord, font = _coord_text(image, size="auto", x=-0.5, y=0.5) #doctest: +SKIP
>>> draw.text(coord, text="hello", fill="black", font=font) #doctest: +SKIP
>>> image #doctest: +SKIP
"""
if unit == "grid":
# Get coordinates in pixels
width, height = image.size
x = np.int(rescale(x, to=[0, width], scale=[-1, 1]))
if method == "pil":
y = np.int(rescale(-y, to=[0, height], scale=[-1, 1]))
elif method == "psychopy":
y = np.int(rescale(y, to=[0, height], scale=[-1, 1]))
if size == "auto":
# Initialize values
size, top_left_x, top_left_y, right_x, bottom_y = 0, width, height, 0, 0
# Loop until max size is reached
while (
top_left_x > 0.01 * width
and right_x < 0.99 * width
and top_left_y > 0.01 * height
and bottom_y < 0.99 * height
):
loaded_font = PIL.ImageFont.truetype(font, size)
text_width, text_height = loaded_font.getsize(text)
top_left_x = x - (text_width / 2)
top_left_y = y - (text_height / 2)
right_x = top_left_x + text_width
bottom_y = top_left_y + text_height
size += 1 # Increment text size
else:
loaded_font = PIL.ImageFont.truetype(font, size)
text_width, text_height = loaded_font.getsize(text)
top_left_x = x - (text_width / 2)
top_left_y = y - (text_height / 2)
coord = top_left_x, top_left_y
return coord, loaded_font, x, y
def _coord_line(
image=None,
x=0,
y=0,
x1=None,
y1=None,
x2=None,
y2=None,
length=None,
angle=None,
adjust_width=False,
adjust_height=False,
method="pil",
):
"""
"""
# Center to None if x1 entered
x = None if x1 is not None else x
y = None if y1 is not None else y
# Get missing parameters
if x is None and y is None:
if x2 is None and y2 is None:
x2, y2 = _coord_line_x2y2(x1, y1, length, angle)
if length is None and angle is None:
length, angle = _coord_line_lengthangle(x1, y1, x2, y2)
else:
if x2 is None and y2 is None:
x2, y2 = _coord_line_x2y2(x, y, length / 2, angle)
if length is None and angle is None:
length, angle = _coord_line_lengthangle(x, y, x2, y2)
length = length * 2
x1, y1 = _coord_line_x2y2(x2, y2, length, 180 + angle)
# Get coordinates in pixels
if image is not None:
width, height = image.size
if adjust_width is True:
x1, x2 = x1 * (height / width), x2 * (height / width)
if adjust_height is True:
y1, y2 = y1 * (width / height), y2 * (width / height)
x1 = np.int(rescale(x1, to=[0, width], scale=[-1, 1]))
x2 = np.int(rescale(x2, to=[0, width], scale=[-1, 1]))
if method == "pil":
y1 = np.int(rescale(-y1, to=[0, height], scale=[-1, 1]))
y2 = np.int(rescale(-y2, to=[0, height], scale=[-1, 1]))
elif method == "psychopy":
y1 = np.int(rescale(y1, to=[0, height], scale=[-1, 1]))
y2 = np.int(rescale(y2, to=[0, height], scale=[-1, 1]))
length = np.int(rescale(length, to=[0, height], scale=[0, 2]))
return (x1, y1, x2, y2), length, angle
def _coord_line_x2y2(x1=None, y1=None, length=None, angle=None):
x2 = x1 + np.sin(np.deg2rad(angle)) * length
y2 = y1 + np.cos(np.deg2rad(angle)) * length
return x2, y2
def _coord_line_lengthangle(x1=None, y1=None, x2=None, y2=None):
length = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
angle = np.rad2deg(np.arccos(np.abs(x1 - x2) / length))
return length, angle
def _coord_rectangle(image=None, x=0, y=0, size_width=1, size_height=1, method="pil"):
"""
"""
x1 = x - (size_width / 2)
y1 = y + (size_height / 2)
x2 = x + (size_width / 2)
y2 = y - (size_height / 2)
# Get coordinates in pixels
if image is not None:
width, height = image.size
x1 = np.int(rescale(x1, to=[0, width], scale=[-1, 1]))
x2 = np.int(rescale(x2, to=[0, width], scale=[-1, 1]))
if method == "pil":
y1 = np.int(rescale(-y1, to=[0, height], scale=[-1, 1]))
y2 = np.int(rescale(-y2, to=[0, height], scale=[-1, 1]))
elif method == "psychopy":
y1 = np.int(rescale(y1, to=[0, height], scale=[-1, 1]))
y2 = np.int(rescale(y2, to=[0, height], scale=[-1, 1]))
return (x1, y1, x2, y2)
| 7,052 | 2,695 |
import sys
import argparse
import fnmatch
import os
import re
import shutil
import glob
import logging
import multiprocessing
from copy_reg import pickle
from types import MethodType
_logger = logging.getLogger('default')
_logger.addHandler(logging.StreamHandler())
_logger.setLevel(logging.CRITICAL)
def _pickle_method(method):
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
class InputParser:
def __init__(self, path):
self.path = path
self.parsedInfo = {'resnuc': [], 'usrbin': []}
def _get_bins(self):
r = re.compile("([0-9]{2})$")
self.bins = set([int(re.search(r, fN).group(1)) for fN in glob.glob(self.path + '/*fort*')])
def _drop_bin(self, bin):
try:
self.bins.remove(bin)
return True
except:
return False
def __parse_scoring_cards(self):
re_resnuc = re.compile("^RESNUC")
re_usrbin = re.compile("^(USRBIN)\s+\d+.?\d?\s+\w+.*")
try:
input_file = glob.glob(self.path + '/*.inp')[0]
except IndexError:
_logger.critical("Unable to locate .inp file required for parsing scoring card information. Either provide "
"it in the input directory or specify card and bins explicitly.")
sys.exit(1)
for line in open(input_file).readlines():
if len(self.bins) == 0:
return
if re.match(re_resnuc, line):
index = abs(int(line.split()[2].rstrip('.')))
add_bin = self._drop_bin(index)
if add_bin:
self.parsedInfo['resnuc'].append(index)
elif re.match(re_usrbin, line):
index = abs(int(line.split()[3].rstrip('.')))
add_bin = self._drop_bin(index)
if add_bin:
self.parsedInfo['usrbin'].append(index)
def parse(self):
self._get_bins()
self.__parse_scoring_cards()
return self.parsedInfo
class Merger(object):
def __init__(self, path, out_path):
self.curdir = os.getcwd()
self.path = path
self.bins = []
self.filelist = []
self.cycle = []
self.parse_dir()
self.mergingCodeLookup = {'resnuc': 'usrsuw',
'usrbin': 'usbsuw'}
self.out_path = out_path
self.__class__.check_fluka_loaded()
self.check_out_path()
@staticmethod
def check_fluka_loaded():
try:
os.environ['FLUPRO']
except KeyError:
_logger.critical('FLUPRO environment not setup. Please export FLUPRO pointing to your FLUKA \
installation directory.')
sys.exit(1)
def check_out_path(self):
if self.out_path is not None:
self.out_path = os.path.abspath(self.out_path)
if not os.path.exists(self.out_path):
os.makedirs(self.out_path)
def parse_dir(self):
for file_name in os.listdir(self.path):
if fnmatch.fnmatch(file_name, '*???_fort.??*'):
self.geom = file_name[:-11]
c = int(file_name[-11:-8])
b = int(file_name[-2:])
self.filelist.append(file_name)
if b not in self.bins:
self.bins.append(b)
if c not in self.cycle:
self.cycle.append(c)
def merge(self, cards):
pickle(MethodType, _pickle_method, _unpickle_method)
jobs = [(k,v) for k, values in cards.items() for v in values]
pool = multiprocessing.Pool(processes=min(len(jobs), multiprocessing.cpu_count()))
pool.map(self._merge_impl, jobs)
def _merge_impl(self, *args):
card = args[0][0]
b = args[0][1]
_logger.debug("Merge " + card + " for bin " + str(b))
os.chdir(self.path)
list_name = 'list_' + str(b) + '_' + card
os.system('ls -1 *_fort.'+str(b)+'* > ' + list_name)
os.system('echo "" >> ' + list_name)
os.system('echo "' + self.geom + '_' + card + '_'+str(b)+'" >> ' + list_name)
os.system('%s/flutil/%s < %s ' % (os.environ['FLUPRO'], self.mergingCodeLookup[card], list_name))
if self.out_path is not None:
self.move(card, b)
if card == 'usrbin':
self.convert_to_ascii(card, b)
def move(self, card, index):
for fName in glob.glob(r'%s/%s_%s_%s*' % (self.path,
self.geom,
card,
index)):
shutil.move(fName, os.path.join(self.out_path, fName.split('/')[-1]))
def convert_to_ascii(self, card, bin):
os.chdir(self.out_path)
tmp_file_name = 'asciiconversion_%s_%i.txt' % (card, bin)
for file_name in glob.glob(r'%s/%s_%s_%s*' % (self.out_path,
self.geom,
card,
bin)):
if file_name.endswith('.ascii'):
continue
file_name = os.path.split(file_name)[1]
tmp_file = open(os.path.join(self.curdir, tmp_file_name), 'w+')
print >> tmp_file, file_name
print >> tmp_file, file_name + '.ascii'
tmp_file.close()
os.system('%s/flutil/usbrea < %s > /dev/null' % (os.environ['FLUPRO'], os.path.join(self.curdir, tmp_file_name)))
os.remove(os.path.join(self.curdir, tmp_file_name))
os.chdir(self.curdir)
def main(argv):
parser = argparse.ArgumentParser(description='Script for merging fluka bin data')
parser.add_argument('path', help='input path')
parser.add_argument('--card', '-c', required=False, default=None, help='card')
parser.add_argument('--bins', '-b', required=False, default=None, type=int, nargs='+', help='bins')
parser.add_argument('--output', '-o', default=None, help='output directory')
parser.add_argument('--debug', '-d', action='store_true', default=False, help='Switch on debug messages')
args = parser.parse_args()
if args.debug:
_logger.setLevel(logging.DEBUG)
path = os.path.abspath(args.path)
if not args.card and not args.bins:
parser = InputParser(path)
scoring_cards = parser.parse()
else:
scoring_cards = {args.card : args.bins}
merger = Merger(path, args.output)
merger.merge(scoring_cards)
if __name__ == '__main__':
main(sys.argv[1:])
| 6,971 | 2,241 |
from balldontlie import balldontlie, player, stats
from matplotlib import pyplot as plt
'''This function gets more information about the player by inputting
their name and dataset to search'''
def getplayer(firstname, lastname, datalist):
for players in datalist:
for info in players.data:
if info['first_name'] == firstname and info['last_name'] == lastname:
return player(info['first_name'], info['last_name'], info['id'])
def main():
totalpages = range(1, 34)
kobeyears = range(1996,2016)
kobestatlist = []
kobeperlist = []
datalist = []
for page in totalpages:
datalist.append(balldontlie('https://www.balldontlie.io/api/v1/players?page=' + str(page)))
kobe = getplayer('Kobe', 'Bryant', datalist)
for year in kobeyears:
kobestatlist.append(kobe.getstats(kobe,year))
for stat in kobestatlist:
kobeperlist.append(stat.calculate_PER(stat))
plt.plot(kobeyears, kobeperlist, label= "Kobe Bryant's Player Efficiency Rating",
color='yellow')
plt.xlabel('Season')
plt.xticks(kobeyears)
plt.ylabel('Player Efficiency Rating')
plt.title('Change in PER Over Time')
ax = plt.gca()
ax.set_facecolor('purple')
plt.legend()
plt.show()
if __name__ == "__main__":
main()
| 1,325 | 452 |