index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
7,800 | 63f155f7da958e9b6865007c701f7cf986b0cbac | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 24 12:16:15 2020
@author: zhangjuefei
"""
import sys
sys.path.append('../..')
import numpy as np
from sklearn.datasets import fetch_openml
from sklearn.preprocessing import OneHotEncoder
import matrixslow as ms
# 加载MNIST数据集,取一部分样本并归一化
X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
X, y = X[:1000] / 255, y.astype(np.int)[:1000]
# 将整数形式的标签转换成One-Hot编码
oh = OneHotEncoder(sparse=False)
one_hot_label = oh.fit_transform(y.values.reshape(-1, 1))
# 输入图像尺寸
img_shape = (28, 28)
# 输入图像
x = ms.core.Variable(img_shape, init=False, trainable=False)
# One-Hot标签
one_hot = ms.core.Variable(dim=(10, 1), init=False, trainable=False)
# 第一卷积层
conv1 = ms.layer.conv([x], img_shape, 3, (5, 5), "ReLU")
# 第一池化层
pooling1 = ms.layer.pooling(conv1, (3, 3), (2, 2))
# 第二卷积层
conv2 = ms.layer.conv(pooling1, (14, 14), 3, (3, 3), "ReLU")
# 第二池化层
pooling2 = ms.layer.pooling(conv2, (3, 3), (2, 2))
# 全连接层
fc1 = ms.layer.fc(ms.ops.Concat(*pooling2), 147, 120, "ReLU")
# 输出层
output = ms.layer.fc(fc1, 120, 10, "None")
# 分类概率
predict = ms.ops.SoftMax(output)
# 交叉熵损失
loss = ms.ops.loss.CrossEntropyWithSoftMax(output, one_hot)
# 学习率
learning_rate = 0.005
# 优化器
optimizer = ms.optimizer.Adam(ms.default_graph, loss, learning_rate)
# 批大小
batch_size = 32
# 训练
for epoch in range(60):
batch_count = 0
for i in range(len(X)):
feature = np.mat(X.values[i]).reshape(img_shape)
label = np.mat(one_hot_label[i]).T
x.set_value(feature)
one_hot.set_value(label)
optimizer.one_step()
batch_count += 1
if batch_count >= batch_size:
print("epoch: {:d}, iteration: {:d}, loss: {:.3f}".format(epoch + 1, i + 1, loss.value[0, 0]))
optimizer.update()
batch_count = 0
pred = []
for i in range(len(X)):
feature = np.mat(X[i]).reshape(img_shape)
x.set_value(feature)
predict.forward()
pred.append(predict.value.A.ravel())
pred = np.array(pred).argmax(axis=1)
accuracy = (y == pred).astype(np.int).sum() / len(X)
print("epoch: {:d}, accuracy: {:.3f}".format(epoch + 1, accuracy)) |
7,801 | 881afd6877508243fa5056d2a82d88ba69ffb8c0 | from graphviz import Digraph
from math import log2, ceil
def hue_to_rgb(p, q, t):
if t < 0: t += 1
if t > 1: t -= 1
if t < 1/6: return p + (q - p) * 6 * t
if t < 1/2: return q
if t < 2/3: return p + (q - p) * (2/3 - t) * 6
return p
def hsl_to_rgb(h, s, l):
h /= 360
q = l * (1 + s) if l < 0.5 else l + s - l * s
p = 2 * l - q
r = hue_to_rgb(p, q, h + 1/3)
g = hue_to_rgb(p, q, h)
b = hue_to_rgb(p, q, h - 1/3)
return r, g, b
def rgb_to_hex(r, g, b):
return f'#{int(r*255):02x}{int(g*255):02x}{int(b*255):02x}'
def hue(h):
return rgb_to_hex(*hsl_to_rgb(h, 0.5, 0.5))
def dfs(node, val):
if node.val == val: return node
for child in node.children:
found = dfs(child, val)
if found: return found
return None
def bfs(node, val):
q = [node]
while q:
node = q.pop(0)
if node.val == val: return node
q.extend(node.children)
return None
class Node:
def __init__(self, val, children=None, parent=None):
self.id = str(val)
self.val = val
self.parent = parent
self.depth = -1
self.size = -1
self.index = -1
self.attrs = {}
self._index = []
self.children = children if children else []
for child in self.children: child.under(self)
def by_index(self, index): return self._index[index]
def process(self, root):
index = Counter()
def dfs(node, depth):
node.depth = depth
node.size = 1
node.index = index.inc()
root._index.append(node)
for child in node.children:
dfs(child, depth + 1)
node.size += child.size
dfs(root, 0)
def adopt(self, child): self.children.append(child)
def under(self, parent): self.parent = parent
def __repr__(self): return f'{self.val} (d{self.depth} s{self.size})'
def render(self):
dot = Digraph(format=FORMAT,
node_attr={'shape': 'plaintext'},
edge_attr={'arrowsize': '0.5'},
)
self.render_(dot)
dot.render('binary_lifting', view=True)
def render_(self, dot):
dot.node(self.id, str(self), **self.attrs)
for child in self.children:
dot.edge(self.id, child.id)
child.render_(dot)
def find(self, val):
return dfs(self, val)
def example():
g = Node(1, [
Node(2, [
Node(4), Node(5, [
Node(8), Node(9, [
Node(10), Node(11, [
Node(18), Node(19, [
Node(22), Node(23), Node(24)
]), Node(20), Node(21)
])
])
])
]),
Node(3, [
Node(6, [
Node(12), Node(13, [
Node(14), Node(15, [
Node(16), Node(17)
])
])
]), Node(7)
])
])
g.process(g)
return g
dummy = Node(-1)
def climb(node):
path = [node]
while node.parent:
node = node.parent
path.append(node)
return path
class Counter:
def __init__(self):
self.count = 0
def inc(self):
count, self.count = self.count, self.count + 1
return count
class Lifting:
def __init__(self, root):
self.root = root
self.up = []
self.process(root)
@property
def l(self):
n = self.root.size
return ceil(log2(n))
def process(self, root):
timer = Counter()
tin, tout = {}, {}
n = root.size
up = []
for _ in range(n): up.append([None] * (self.l+1))
def dfs(node, parent):
print('visit', node.index)
tin[node.index] = timer.inc()
up[node.index][0] = parent.index
for i in range(1, self.l+1): up[node.index][i] = up[up[node.index][i-1]][i-1]
for child in node.children:
if child != parent: dfs(child, node)
tout[node.index] = timer.inc()
dfs(root, root)
self.up = up
self.tin = tin
self.tout = tout
print(tin)
print(tout)
def is_ancestor(self, a, b):
ai, bi = a.index, b.index
return self.tin[ai] <= self.tin[bi] and self.tout[ai] >= self.tout[bi]
def lca(self, a, b):
if self.is_ancestor(a, b): return a
if self.is_ancestor(b, a): return b
for i in range(self.l, -1, -1):
print('i', i, 'index', a.index)
index = self.up[a.index][i]
p = self.root.by_index(index)
if not self.is_ancestor(p, b): a = p
index = self.up[a.index][0]
return self.root.by_index(index)
def lca_slow(self, a, b):
path_a = climb(a)[::-1]
path_b = climb(b)[::-1]
for i in range(len(path_a)):
if path_a[i] != path_b[i]:
return path_a[i - 1]
return path_a[-1]
def render(self):
dot = Digraph(format=FORMAT,
node_attr={'shape': 'plaintext'},
edge_attr={'arrowsize': '0.5'},
engine='dot',
)
self.root.render_(dot)
for i in range(len(self.up)):
angle = i/len(self.up)*360.0 + i%2*180.0
color = hue(angle)
for j in range(self.l+1):
p = self.up[i][j]
if p != 0:
a = self.root.by_index(i)
b = self.root.by_index(p)
dot.edge(a.id, b.id, style='dashed', color=color)
dot.render('binary_lifting', view=True)
FORMAT = 'svg'
if __name__ == '__main__':
g = example()
l = Lifting(g)
#p = l.lca_slow(g.find(10), g.find(17))
a = g.find(8)
b = g.find(20)
p = l.lca(a, b)
a.attrs['fontcolor'] = 'red'
b.attrs['fontcolor'] = 'red'
p.attrs['fontcolor'] = 'green'
l.render()
|
7,802 | a5dff32dfbe93ba081144944381b96940da541ad | # Generated by Django 2.0.5 on 2019-06-12 08:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('doctor', '0257_merge_20190524_1533'),
('doctor', '0260_merge_20190604_1428'),
]
operations = [
]
|
7,803 | 9cfbb06df4bc286ff56983d6e843b33e4da6ccf8 | """
Given the root of a binary tree, check whether it is a mirror of itself (i.e., symmetric around its center).
Example 1:
Input: root = [1, 2, 2, 3, 4, 4, 3]
Output: true
1
/ \
2 2
/ \ / \
3 4 4 3
Example 2:
Input: root = [1, 2, 2, None, 3, None, 3]
Output: false
1
/ \
2 2
\ \
3 3
"""
"""
We recursively check whether opposite ends of the tree are equal, going down the tree.
The logic is very similar to problem 100.
"""
from shared import list_to_tree
def is_symmetric(root):
def helper(left, right):
if left is None and right is None:
return True
elif left and right:
return helper(left.left, right.right) and left.val == right.val and helper(left.right, right.left)
else:
return False
return helper(root.left, root.right)
assert is_symmetric(list_to_tree([1, 2, 2, 3, 4, 4, 3])) is True
assert is_symmetric(list_to_tree([1, 2, 2, None, 3, None, 3])) is False
assert is_symmetric(list_to_tree([1, 2, 2, None, 2, None])) is False
assert is_symmetric(list_to_tree([1, 2, 3])) is False
|
7,804 | b63dc8b9aa2f0593a4a7eb52a722a9c4da6c9e08 | import pandas as pd
from pandas import Series, DataFrame
def load_excel( data_path, data_name, episode_Num):
data_name = data_name + str(episode_Num)+'.xlsx'
dataframe = pd.read_excel(data_path + data_name,index_col=0)
return dataframe
def dataframe_to_numpy(dataframe):
numpy_array = dataframe.to_numpy()
return numpy_array
def numpy_to_tensor( numpy_array):
tensor = torch.from_numpy(numpy_array)
return tensor
def transform( data, data_path, data_name, episode_Num):
data = load_excel(data_path, data_name, episode_Num)
data = dataframe_to_numpy(data)
data = numpy_to_tensor(data)
return data
def data_slice(data, num_of_data):
data = data[:, 1:num_of_data+1]
return data |
7,805 | 8a1f024be00200218782c919b21161bf48fc817e | # from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db import models
# from applications.models import ApplicationReview
# from profiles.models import Restaurant, Program, Courier
# Enum for Admin
BASIC_ADMIN = 'ADMIN'
SUPER_ADMIN = 'SUPER'
MANAGER = 'MNGR'
DEVELOPER = 'DEV'
STAFF = 'STAFF'
ADMIN_ROLE_OPTIONS = [
(BASIC_ADMIN, 'basic admin'),
(SUPER_ADMIN, 'super admin'),
(MANAGER, 'manager'),
(DEVELOPER, 'developer'),
(STAFF, 'stuff'),
]
PROGRAM = "PR"
RESTAURANT = "RE"
USER_TYPE_OPTIONS = [
(PROGRAM, 'Program'),
(RESTAURANT, 'Restaurant'),
]
PHONE = "PH"
EMAIL = "EM"
PREFERRED_CONTACT = [
(PHONE, 'Phone'),
(EMAIL, 'Email'),
]
ADMIN = "ADM"
BASIC_USER = "BSC"
USER_TYPES = [
(ADMIN, 'Admin'),
(BASIC_USER, 'Basic User'),
]
class UserClassManager(BaseUserManager):
"""Manager for User class"""
# method for creatig admins, but not super admins
def create_staffuser(self, last_name, first_name, email, password, role, phone_number=''):
new_account = self.create_user(phone_number=phone_number, last_name=last_name, first_name=first_name,
email=email, password=password)
new_account.staff = True
admin_object = AdminUser.objects.create(role=role)
new_account.admin_object = admin_object
new_account.user_type = ADMIN
admin_object.save(using=self._db)
new_account.save(using=self._db)
return new_account
def create_basic_user(self, type, last_name, first_name, email, password, phone_number=''):
new_account = self.create_user(phone_number=phone_number, last_name=last_name, first_name=first_name,
email=email, password=password)
user_object = BasicUser.objects.create(type=type)
new_account.user_object = user_object
new_account.user_type = BASIC_USER
user_object.save(using=self._db)
new_account.save(using=self._db)
return new_account
# method for creating restaurants, schools, etc.
def create_user(self, last_name, first_name, email, password, phone_number=''):
new_account = self.model(email=self.normalize_email(email),)
new_account.set_password(password)
new_account.last_name = last_name
new_account.first_name = first_name
new_account.phone_number = phone_number
new_account.save(using=self._db)
return new_account
# method for creating superadmins
def create_superuser(self, last_name, first_name, email, password, phone_number=''):
new_account = self.create_user(phone_number=phone_number, last_name=last_name, first_name=first_name,
email=email, password=password)
new_account.staff = True
new_account.admin = True
admin_object = AdminUser.objects.create(role=SUPER_ADMIN)
new_account.admin_object = admin_object
new_account.user_type = ADMIN
admin_object.save(using=self._db)
new_account.save(using=self._db)
return new_account
# add any required fields here other than email and password
REQUIRED_FIELDS = []
USERNAME_FIELD = 'email'
class UserClass(AbstractBaseUser):
"""Class for general user - can be basic user or admin"""
phone_number = models.CharField(verbose_name='phone number', max_length=255, unique=False, default='')
active = models.BooleanField(default=True)
is_active = models.BooleanField(default=True)
email = models.EmailField(verbose_name='email', max_length=255, unique=True, )
last_name = models.CharField(verbose_name='last name', max_length=255, unique=False, )
first_name = models.CharField(verbose_name='first name', max_length=255, unique=False, )
objects = UserClassManager()
staff = models.BooleanField(default=False)
admin = models.BooleanField(default=False)
image = models.CharField(verbose_name='user image', max_length=255, unique=False, default='defaultIcon.png')
USERNAME_FIELD = "email"
REQUIRED_FIELDS = ['first_name', 'last_name']
user_type = models.CharField(
max_length=20,
choices=USER_TYPES,
default=BASIC_USER,
)
user_object = models.ForeignKey('profiles.BasicUser', on_delete=models.DO_NOTHING, null=True, related_name='basic_user_parent')
admin_object = models.ForeignKey('profiles.AdminUser', on_delete=models.DO_NOTHING, null=True, related_name='admin_user_parent')
def has_module_perms(self, app_label):
return True
@property
def is_admin(self):
return self.admin
def get_full_name(self):
return self.first_name + ' ' + self.last_name
def get_short_name(self):
return self.first_name
@property
def is_staff(self):
return self.staff
def __str__(self):
return self.email
class AdminUser(models.Model):
"""Model for admin user data"""
role = models.CharField(
max_length=20,
choices=ADMIN_ROLE_OPTIONS,
default=STAFF,
)
class BasicUser(models.Model):
"""Model for basic user data"""
type = models.CharField(
max_length=20,
choices=USER_TYPE_OPTIONS,
default=RESTAURANT,
)
preferred_contact = models.CharField(
max_length=20,
choices=PREFERRED_CONTACT,
default=EMAIL,
)
position = models.CharField(verbose_name='position/title', max_length=255, unique=False, null=True)
restaurant = models.ForeignKey('profiles.Restaurant', on_delete=models.CASCADE, null=True)
program = models.ForeignKey('profiles.Program', on_delete=models.CASCADE, null=True)
courier = models.ForeignKey('profiles.Courier', on_delete=models.CASCADE, null=True)
class Schedule(models.Model):
monday_start = models.TimeField(auto_now=False, null=True, blank=True)
monday_end = models.TimeField(auto_now=False, null=True, blank=True)
tuesday_start = models.TimeField(auto_now=False, null=True, blank=True)
tuesday_end = models.TimeField(auto_now=False, null=True, blank=True)
wednesday_start = models.TimeField(auto_now=False, null=True, blank=True)
wednesday_end = models.TimeField(auto_now=False, null=True, blank=True)
thursday_start = models.TimeField(auto_now=False, null=True, blank=True)
thursday_end = models.TimeField(auto_now=False, null=True, blank=True)
friday_start = models.TimeField(auto_now=False, null=True, blank=True)
friday_end = models.TimeField(auto_now=False, null=True, blank=True)
saturday_start = models.TimeField(auto_now=False, null=True, blank=True)
saturday_end = models.TimeField(auto_now=False, null=True, blank=True)
sunday_start = models.TimeField(auto_now=False, null=True, blank=True)
sunday_end = models.TimeField(auto_now=False, null=True, blank=True)
def getSchedule(self):
schedule = {}
if self.monday_start:
schedule['monday_start'] = self.monday_start.strftime("%-I:%M %p")
else:
schedule['monday_start'] = ''
if self.monday_end:
schedule['monday_end'] = self.monday_end.strftime("%-I:%M %p")
else:
schedule['monday_end'] = ''
if self.tuesday_start:
schedule['tuesday_start'] = self.tuesday_start.strftime("%-I:%M %p")
else:
schedule['tuesday_start'] = ''
if self.tuesday_end:
schedule['tuesday_end'] = self.tuesday_end.strftime("%-I:%M %p")
else:
schedule['tuesday_end'] = ''
if self.wednesday_start:
schedule['wednesday_start'] = self.wednesday_start.strftime("%-I:%M %p")
else:
schedule['wednesday_start'] = ''
if self.wednesday_end:
schedule['wednesday_end'] = self.wednesday_end.strftime("%-I:%M %p")
else:
schedule['wednesday_end'] = ''
if self.thursday_start:
schedule['thursday_start'] = self.thursday_start.strftime("%-I:%M %p")
else:
schedule['thursday_start'] = ''
if self.thursday_end:
schedule['thursday_end'] = self.thursday_end.strftime("%-I:%M %p")
else:
schedule['thursday_end'] = ''
if self.friday_start:
schedule['friday_start'] = self.friday_start.strftime("%-I:%M %p")
else:
schedule['friday_start'] = ''
if self.friday_end:
schedule['friday_end'] = self.friday_end.strftime("%-I:%M %p")
else:
schedule['friday_end'] = ''
if self.saturday_start:
schedule['saturday_start'] = self.saturday_start.strftime("%-I:%M %p")
else:
schedule['saturday_start'] = ''
if self.saturday_end:
schedule['saturday_end'] = self.saturday_end.strftime("%-I:%M %p")
else:
schedule['saturday_end'] = ''
if self.sunday_start:
schedule['sunday_start'] = self.sunday_start.strftime("%-I:%M %p")
else:
schedule['sunday_start'] = ''
if self.sunday_end:
schedule['sunday_end'] = self.sunday_end.strftime("%-I:%M %p")
else:
schedule['sunday_end'] = ''
return schedule
class Restaurant(models.Model):
created_at = models.DateTimeField(auto_now=True)
company_name = models.CharField(verbose_name='company name', max_length=255, unique=False, )
main_contact = models.ForeignKey('profiles.UserClass', on_delete=models.DO_NOTHING, related_name="restaurant_object", null=True)
phone_number = models.CharField(verbose_name='phone number', max_length=255, unique=False, )
schedule = models.ForeignKey('profiles.Schedule', on_delete=models.DO_NOTHING, null=True)
meals = models.IntegerField()
uber_eats = models.BooleanField(default=False)
delivery_capacity = models.BooleanField(default=False)
packaging = models.BooleanField(default=False)
health_certificate = models.CharField(verbose_name='health certificate', max_length=255, unique=False, )
address = models.CharField(verbose_name='address', max_length=255, unique=False, )
coordinates = models.CharField(verbose_name='coordinates', max_length=255, unique=False, null=True)
latitude = models.CharField(verbose_name='latitude', max_length=255, unique=False, null=True)
longitude = models.CharField(verbose_name='longitude', max_length=255, unique=False, null=True)
review = models.ForeignKey('applications.ApplicationReview', related_name='restaurants',
on_delete=models.DO_NOTHING, null=True)
class Program(models.Model):
created_at = models.DateTimeField(auto_now=True)
program_name = models.CharField(verbose_name='program name', max_length=255, unique=False, )
main_contact = models.ForeignKey('profiles.UserClass', on_delete=models.DO_NOTHING, related_name="program_object", null=True)
phone_number = models.CharField(verbose_name='phone number', max_length=255, unique=False, )
schedule = models.ForeignKey('profiles.Schedule', on_delete=models.DO_NOTHING, null=True)
meals = models.IntegerField(default=0, null=True)
address = models.CharField(verbose_name='address', max_length=255, unique=False, )
coordinates = models.CharField(verbose_name='address', max_length=255, unique=False, null=True)
latitude = models.CharField(verbose_name='latitude', max_length=255, unique=False, null=True)
longitude = models.CharField(verbose_name='longitude', max_length=255, unique=False, null=True)
review = models.ForeignKey('applications.ApplicationReview', related_name="programs",
on_delete=models.DO_NOTHING, null=True)
class Courier(models.Model):
created_at = models.DateTimeField(auto_now=True)
class Profile(models.Model):
user = models.OneToOneField(BasicUser, on_delete=models.CASCADE)
avatar = models.ImageField(upload_to='avatars', blank=True)
def __str__(self):
return self.user.username
|
7,806 | c967aa647a97b17c9a7493559b9a1577dd95263a | # -*- coding: utf-8 -*-
import math
# 冒泡排序(Bubble Sort)
# 比较相邻的元素。如果第一个比第二个大,就交换它们两个;
# 对每一对相邻元素作同样的工作,从开始第一对到结尾的最后一对,这样在最后的元素应该会是最大的数;
# 针对所有的元素重复以上的步骤,除了最后一个;
# 重复步骤1~3,直到排序完成。
# 冒泡排序总的平均时间复杂度为:O(n^2)
def bubble_sort(input):
print("\nBubble Sort")
input_len = len(input)
print("length of input: %d" % input_len)
for i in range(0, input_len):
for j in range(0, input_len - 1 - i):
if input[j] > input[j + 1]:
tmp = input[j + 1]
input[j + 1] = input[j]
input[j] = tmp
return input
test_arr = [3, 4, 1, 6, 30, 5]
test_arr_bubble_sorted = bubble_sort(test_arr)
print(test_arr_bubble_sorted)
# 选择排序(Selection-sort)
# 选择排序(Selection-sort)是一种简单直观的排序算法。它的工作原理:首先在未排序序列中找到最小(大)元素,存放到排序序列的起始位置,
# 然后,再从剩余未排序元素中继续寻找最小(大)元素,然后放到已排序序列的末尾。以此类推,直到所有元素均排序完毕。
# 选择排序总的平均时间复杂度为:O(n^2)
def select_sort(input):
print("\nSelect Sort")
input_len = len(input)
for i in range(0, input_len):
min_index = i
for j in range(i + 1, input_len):
if input[j] < input[min_index]:
min_index = j
tmp = input[i]
input[i] = input[min_index]
input[min_index] = tmp
return input
test_arr = [3, 4, 1, 6, 30, 5]
test_arr_select_sorted = select_sort(test_arr)
print(test_arr_select_sorted)
# 插入排序(Insertion Sort)
# 插入排序(Insertion-Sort)的算法描述是一种简单直观的排序算法。它的工作原理是通过构建有序序列,对于未排序数据,
# 在已排序序列中从后向前扫描,找到相应位置并插入。
# 归并排序(Merge Sort)
# 首先归并排序使用了二分法,归根到底的思想还是分而治之。拿到一个长数组,将其不停的分为左边和右边两份,然后以此递归分下去。
# 然后再将她们按照两个有序数组的样子合并起来。
# 归并排序时间复杂度是o(nlogn)
def merge_sort(input):
input_len = len(input)
if input_len <= 1:
return input
mid = math.floor(input_len / 2)
left = merge_sort(input[:mid])
right = merge_sort(input[mid:])
return merge(left, right)
def merge(sorted_arr1, sorted_arr2):
result = []
i = j = 0
while i < len(sorted_arr1) and j < len(sorted_arr2):
if sorted_arr1[i] < sorted_arr2[j]:
result.append(sorted_arr1[i])
i = i + 1
else:
result.append(sorted_arr2[j])
j = j + 1
if i == len(sorted_arr1):
for item in sorted_arr2[j:]:
result.append(item)
else:
for item in sorted_arr1[i:]:
result.append(item)
return result
test_arr = [3, 4, 1, 6, 30, 5]
print("\nMerge Sort")
test_arr_merge_sorted = merge_sort(test_arr)
print(test_arr_merge_sorted)
# 快速排序(Quick Sort)
# 快速排序使用分治法来把一个串(list)分为两个子串(sub-lists)。具体算法描述如下:
#
# 从数列中挑出一个元素,称为 “基准”(pivot);
# 重新排序数列,所有元素比基准值小的摆放在基准前面,所有元素比基准值大的摆在基准的后面(相同的数可以到任一边)。
# 在这个分区退出之后,该基准就处于数列的中间位置。这个称为分区(partition)操作;
# 递归地(recursive)把小于基准值元素的子数列和大于基准值元素的子数列排序。
# 快速排序时间复杂度是o(nlogn)
def quick_sort(li, start, end):
# 分治 一分为二
# start=end ,证明要处理的数据只有一个
# start>end ,证明右边没有数据
if start >= end:
return
# 定义两个游标,分别指向0和末尾位置
left = start
right = end
# 把0位置的数据,认为是中间值
mid = li[left]
while left < right:
# 让右边游标往左移动,目的是找到小于mid的值,放到left游标位置
while left < right and li[right] >= mid:
right -= 1
li[left] = li[right]
# 让左边游标往右移动,目的是找到大于mid的值,放到right游标位置
while left < right and li[left] < mid:
left += 1
li[right] = li[left]
# while结束后,把mid放到中间位置,left=right
li[left] = mid
# 递归处理左边的数据
quick_sort(li, start, left-1)
# 递归处理右边的数据
quick_sort(li, left+1, end)
test_arr = [3, 4, 1, 6, 30, 5]
print("\nQuick Sort")
quick_sort(test_arr, 0, len(test_arr)-1)
print(test_arr)
|
7,807 | bef16443f77b2c1e09db9950a4617703085d9f71 | import datetime
import numpy as np
import tensorflow as tf
from alphai_time_series.performance_trials.performance import Metrics
import alphai_cromulon_oracle.cromulon.evaluate as crocubot_eval
import alphai_cromulon_oracle.cromulon.train as crocubot_train
from alphai_cromulon_oracle.cromulon.helpers import TensorflowPath, TensorboardOptions
from alphai_cromulon_oracle.cromulon.model import CrocuBotModel
from alphai_feature_generation.classifier import BinDistribution
from alphai_cromulon_oracle.data.providers import TrainDataProviderForDataSource
from alphai_cromulon_oracle.helpers import printtime, execute_and_get_duration
import examples.iotools as io
from examples.benchmark.helpers import print_time_info
from examples.helpers import D_TYPE, load_default_topology
def run_timed_benchmark_time_series(series_name, tf_flags, do_training=True):
topology = load_default_topology(series_name, tf_flags)
# First need to establish bin edges using full training set
n_train_samples = np.minimum(tf_flags.n_training_samples_benchmark, 10000)
bin_distribution = _create_bin_distribution(series_name, n_train_samples, topology)
batch_size = tf_flags.batch_size
save_path = io.build_check_point_filename(series_name, topology, tf_flags)
@printtime(message="Training {} with do_train: {}".format(series_name, int(do_training)))
def _do_training():
execution_time = datetime.datetime.now()
if do_training:
data_provider = TrainDataProviderForDataSource(
series_name,
D_TYPE,
n_train_samples,
batch_size,
True,
bin_distribution.bin_edges
)
train_x = data_provider.get_batch(0)
raw_train_data = TrainDataProvider(train_x, train_y, tf_flags.batch_size)
tensorflow_path = TensorflowPath(save_path, tf_flags.model_save_path)
tensorboard_options = TensorboardOptions(tf_flags.tensorboard_log_path,
tf_flags.learning_rate,
batch_size,
execution_time
)
crocubot_train.train(topology,
data_provider,
tensorflow_path,
tensorboard_options,
tf_flags
)
else:
tf.reset_default_graph()
model = CrocuBotModel(topology)
model.build_layers_variables()
train_time, _ = execute_and_get_duration(_do_training)
print("Training complete.")
eval_time, _ = execute_and_get_duration(evaluate_network, topology, series_name, batch_size,
save_path, bin_distribution, tf_flags)
print('Metrics:')
print_time_info(train_time, eval_time)
def _create_bin_distribution(series_name, n_training_samples, topology):
data_provider = TrainDataProviderForDataSource(series_name, D_TYPE, n_training_samples, n_training_samples, True)
train_data = data_provider.get_batch(0)
return BinDistribution(train_data.labels, topology.n_classification_bins)
@printtime(message="Evaluation of Stocastic Series")
def evaluate_network(topology, series_name, batch_size, save_path, bin_dist, tf_flags):
n_training_samples = batch_size * 2
data_provider = TrainDataProviderForDataSource(series_name, D_TYPE, n_training_samples, batch_size, False)
test_features, test_labels = data_provider.get_batch(1)
binned_outputs = crocubot_eval.eval_neural_net(test_features, topology, tf_flags, save_path)
estimated_means, estimated_covariance = crocubot_eval.forecast_means_and_variance(
binned_outputs, bin_dist, tf_flags)
test_labels = np.squeeze(test_labels)
model_metrics = Metrics()
model_metrics.evaluate_sample_performance(
data_provider.data_source,
test_labels,
estimated_means,
estimated_covariance
)
|
7,808 | 8bc465a1b546907d8a9e5eee2cae672befb1ea13 | n = int(input())
b = 0
p = [0,0]
flg = True
for i in range(n):
t,x,y = map(int,input().split())
diff = abs(x - p[0]) + abs(y - p[1])
time = t - b
if(diff > time or time%2 != diff %2):
flg = False
break
else:
b = t
p[0] = x
p[1] = y
if flg:
print("Yes")
else:
print("No")
|
7,809 | f9d8280d765826b05bfa7989645e487431799f85 | from flask import Flask
from flask_script import Manager
app = Flask(__name__)
manager = Manager(app)
@app.route('/')
def index():
return '2018/6/1 hello python'
@app.route('/news')
def news():
return '内蒙古新闻资讯,请选择浏览'
if __name__ == '__main__':
manager.run()
|
7,810 | f73a316b6020908472e35a7b78959a9bda6e8e56 | # 导包
from time import sleep
from selenium import webdriver
# 实例化浏览器
driver = webdriver.Firefox()
# 打开页面
driver.get(r"F:\BaiduYunDownload\webdriverspace\sources\注册实例.html")
driver.maximize_window()
sleep(2)
# 定位注册A按钮并点击
driver.find_element_by_link_text("注册A网页").click()
# 获取当前敞口句柄
current_handle = driver.current_window_handle
print("当前敞口句柄:", current_handle)
# 获取所有窗口句柄
handles = driver.window_handles
print("所有敞口句柄:", handles)
# 遍历及切换
for handle in handles:
if current_handle != handle:
# 执行切换窗口方法
driver.switch_to.window(handle)
# 填写注册A信息
# 输入注册A信息
driver.find_element_by_css_selector("#userA").send_keys("admin")
sleep(1)
driver.find_element_by_css_selector("#passwordA").send_keys("123456")
sleep(1)
driver.find_element_by_css_selector("#telA").send_keys("18111265465")
sleep(1)
driver.find_element_by_css_selector("#emailA").send_keys("1188@qq.com")
# 截图并保存
driver.get_screenshot_as_file("../image/imag01.jpg")
sleep(2)
driver.quit()
|
7,811 | 0547751af7bbac42351476dde591d13d40fb37eb | #!/usr/bin/env python
"""
Otsu method for automatic estimation of $T$ threshold value
- assumes two maxima of grayscale histogram & searches for optimal separation
Parameters
Usage
Example
$ python <scriptname>.py --image ../img/<filename>.png
## Explain
"""
import numpy as np
import argparse
import mahotas
import cv2
from numpy.matrixlib.defmatrix import matrix
def main():
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
#preprocessing
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(image, (5,5), 0)
cv2.imshow("Image", image)
# Otsu
T = mahotas.thresholding.otsu(blurred)
print("[INFO] Otsu's threshold {}".format(T))
thresh = image.copy()
thresh[thresh > T] = 255
thresh[thresh < 255] = 0
thresh = cv2.bitwise_not(thresh)
cv2.imshow("Otsu", thresh)
# Riddler-Calvard
T = mahotas.thresholding.rc(blurred)
print("[INFO] Riddler-Calvard: {}".format(T))
thresh = image.copy()
thresh[thresh > T] = 255
thresh[thresh < 255] = 0
thresh = cv2.bitwise_not(thresh)
cv2.imshow("Riddler-Calvard", thresh)
cv2.waitKey(0)
if __name__=="__main__":
main() |
7,812 | 03284f20e614a5f8f5c21939acf49490d6ffd3a3 | import json
startTime = ""
endTime = ""
controller = 0
for files in range(30):
file = open("NewResults" + str(files+1) + ".data")
for line in file:
if line != "\n":
j = json.loads(line)
if controller == 0:
startTime = j['metrics'][0]['startTime']
helper = startTime.split(" ")
hour = helper[1].split(":")[0]
minute = helper[1].split(":")[1]
second = helper[1].split(":")[2]
print("startTime: " + hour + " : " + minute + " : " + second)
elif controller == 14:
endTime = j['metrics'][0]['startTime']
helper = endTime.split(" ")
hour = helper[1].split(":")[0]
minute = helper[1].split(":")[1]
second = helper[1].split(":")[2]
print("endTime: " + hour + " : " + minute + " : " + second)
controller = 0
break
controller += 1
file = open("request-file-burst-1.data", "r")
for line in file:
data = line.split(" ")
grossTime = data[0].split(":")
hour = grossTime[0].split("[")[1]
minute = grossTime[1]
second = grossTime[2].split("]")[0]
print(hour + " : " + minute + " : " + second)
break
|
7,813 | a1b0e72b62abc89d5292f199ec5b6193b544e271 | DEBUG = True
SQLALCHEMY_DATABASE_URI = "postgresql://username:password@IPOrDomain/databasename"
SQLALCHEMY_TRACK_MODIFICATIONS = True
DATABASE_CONNECT_OPTIONS = {}
THREADS_PER_PAGE = 2
|
7,814 | 1c685514f53a320226402a4e4d8f3b3187fad615 | import uuid
from datetime import date
import os
import humanize
class Context:
def __init__(self, function_name, function_version):
self.function_name = function_name
self.function_version = function_version
self.invoked_function_arn = "arn:aws:lambda:eu-north-1:000000000000:function:{}".format(self.function_name)
self.aws_request_id = uuid.uuid1()
self.log_group_name = "/aws/lambda/{}".format(self.function_name)
today = date.today()
self.log_stream_name = "{}/[{}]4459c970fa6d4c77aca62c95850fce54".format(today.strftime("%Y/%m/%d"), self.function_version)
self.memory_limit_in_mb = Context.memory(self)
pass
def memory(self):
mem = int(os.popen("cat /sys/fs/cgroup/memory/memory.limit_in_bytes").read())
self.memory_limit_in_mb = humanize.naturalsize(mem, gnu=True)
return (self.memory_limit_in_mb)
pass
|
7,815 | cdbc7d703da69adaef593e6a505be25d78beb7ce | import numpy as np
class EdgeListError(ValueError):
pass
def check_edge_list(src_nodes, dst_nodes, edge_weights):
"""Checks that the input edge list is valid."""
if len(src_nodes) != len(dst_nodes):
raise EdgeListError("src_nodes and dst_nodes must be of same length.")
if edge_weights is None:
return
if len(edge_weights) != len(src_nodes):
raise EdgeListError("src_nodes and edge_weights must be of same length.")
class AdjacencyMatrixError(ValueError):
pass
def check_adj_matrix(adj_matrix):
"""Checks that the input adjacency matrix is valid."""
if adj_matrix.ndim != 2:
raise AdjacencyMatrixError("The numpy array must be of dimension 2.")
if adj_matrix.shape[0] != adj_matrix.shape[1]:
raise AdjacencyMatrixError("The matrix must be squared.")
def is_symmetric(matrix):
return np.array_equal(matrix, matrix.T)
def wv_to_numpy_array(wv):
vocab_keys = [int(key) for key in wv.vocab.keys()]
embeddings = [wv[str(key)] for key in sorted(vocab_keys)]
return np.array(embeddings, dtype=np.float32)
|
7,816 | 4c5b3042a785342d6ef06fdc882e0dcf91a787c3 |
from datetime import date
import config
import datetime
import numpy
import pandas
import data_sources
from data_sources import POPULATION, convert_to_ccaa_iso
import material_line_chart
import ministry_datasources
HEADER = '''<html>
<head>
<title>{}</title>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript">
'''
HEADER2 = '''
google.charts.load('current', {'packages':['line', 'corechart', 'controls']});
'''
DESCRIPTIONS_CCAA = {
'incidencia_acumulada': 'Número de casos informados en los 15 días anteriores por cien mil habitantes. Datos obtenidos de los informes del Carlos III.',
'hospitalized': 'Número medio de hospitalizaciones por cien mil habitantes (media de 7 días). Datos obtenidos a partir de las cifras acumuladas que aparecen en los informes diarios del ministerio.',
'deceased': 'Número medio de fallecidos por cien mil habitantes (media de 7 días). Datos obtenidos a partir del excel con datos de fallecidos diarios del ministerio.',
}
DESCRIPTIONS_SPA = {
'incidencia_acumulada': 'Número de casos informados en los 15 días anteriores por cien mil habitantes. Datos obtenidos de los informes del Carlos III.',
'hospitalized': 'Número medio de hospitalizaciones (media de 7 días). Datos obtenidos a partir de las cifras acumuladas que aparecen en los informes diarios del ministerio.',
'deceased': 'Número medio de fallecidos (media de 7 días). Datos obtenidos a partir del excel con datos de fallecidos diarios del ministerio.',
}
DESCRIPTIONS = {True: DESCRIPTIONS_SPA, False: DESCRIPTIONS_CCAA}
def calc_accumulated_indicende_per_ccaa(report, num_days=15):
ccaas = data_sources.get_ccaas_in_dset(report)
dframe = report['dframe']
num_cases = dframe['num_casos']
ccaa_column = data_sources.get_ccaa_column_in_index(num_cases.index)
index = num_cases.index.to_frame(index=False)
time_delta = numpy.timedelta64(num_days, 'D')
accumulated_cases_by_ccaa = {}
for ccaa in ccaas:
mask = index[ccaa_column] == ccaa
mask = mask.values
num_cases_for_this_ccaa = num_cases[mask]
this_ccaa_index = num_cases_for_this_ccaa.index.to_frame(index=False)
this_ccaa_dates = this_ccaa_index['fecha']
num_accumulated_cases = []
valid_dates = []
for date in this_ccaa_dates:
date0 = date - time_delta
mask = numpy.logical_and(this_ccaa_dates > date0,
this_ccaa_dates <= date)
mask = mask.values
if numpy.sum(mask) < num_days:
continue
num_accumulated_cases.append(numpy.sum(num_cases_for_this_ccaa[mask]))
valid_dates.append(date)
num_accumulated_cases = pandas.Series(num_accumulated_cases, index=valid_dates)
num_accumulated_cases = num_accumulated_cases / data_sources.POPULATION[ccaa] * 1e5
accumulated_cases_by_ccaa[ccaa] = num_accumulated_cases
return accumulated_cases_by_ccaa
def _create_js_chart(dframe, date_range, js_function_name, div_id, title, width, height):
table = []
ccaas = sorted(dframe.index)
dates = list(dframe.columns)
if date_range is not None:
dates = [date for date in dates if date > date_range[0] and date <= date_range[1]]
columns = [('date', 'fecha')]
columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas])
for date in dates:
row = [date.date()]
for ccaa in ccaas:
value = dframe.loc[ccaa, date]
row.append(value)
table.append(row)
js_function_name = js_function_name
html = material_line_chart.create_chart_js(js_function_name, div_id, title,
columns, table,
width=width, height=height)
return html
def _write_table_from_series(series):
html = '<table>'
for index, value in zip(series.index, series.values):
html += f'<tr><td>{index}</td><td>{value}</td></tr>\n'
html += '</table>'
return html
def is_desired_ccaa(ccaa, desired_ccaas):
return desired_ccaas is None or data_sources.convert_to_ccaa_iso(ccaa) in desired_ccaas
def _create_table_for_chart_from_dict(dict_data, desired_ccaas):
one_data = list(dict_data.values())[0]
ccaas = sorted(dict_data.keys())
ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]
dates = list(one_data.index)
table = []
for date in dates:
row = [date.date()]
for ccaa in ccaas:
row.append(dict_data[ccaa][date])
table.append(row)
return table, ccaas, dates
def _create_accumulate_indicence_table_for_spa_chart_from_report(report, num_days):
dframe = report['dframe']
time_delta = numpy.timedelta64(num_days, 'D')
num_cases = dframe.groupby(level=1).sum().loc[:, 'num_casos']
tot_pop = sum(data_sources.POPULATION.values())
dates = numpy.array(num_cases.index)
num_accumulated_cases = []
valid_dates = []
for date in dates:
date0 = date - time_delta
mask = numpy.logical_and(dates > date0,
dates <= date)
if numpy.sum(mask) < num_days:
continue
num_accumulated_cases.append(numpy.sum(num_cases[mask]) / tot_pop * 1e5)
date = datetime.datetime.fromtimestamp(date.astype('O') / 1e9)
valid_dates.append(date)
table = [(date.date(), cases) for date, cases in zip(valid_dates, num_accumulated_cases)]
dates = valid_dates
return table, dates
def _create_table_for_chart_from_dframe(dframe, desired_ccaas):
ccaas = sorted(dframe.index)
ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]
dates = list(dframe.columns)
table = []
for date in dates:
row = [date.date()]
for ccaa in ccaas:
row.append(dframe.loc[ccaa, date])
table.append(row)
return table, ccaas, dates
def _create_table_for_chart_from_series(series):
table = [(date.date(), value) for date, value in zip(series.index, series.values)]
return table
def write_html_report(out_path, date_range=None, desired_ccaas=None, spa_report=False):
if spa_report and desired_ccaas:
raise ValueError('choose one, either spa or ccaa report')
if desired_ccaas and len(desired_ccaas) == 1:
only_one_ccaa = True
ccaa_iso = convert_to_ccaa_iso(desired_ccaas[0])
else:
only_one_ccaa = False
ccaa_info = data_sources.get_sorted_downloaded_ccaa_info()
report = ccaa_info[-1]
accumulaed_incidence = calc_accumulated_indicende_per_ccaa(report)
deaths = sorted(ministry_datasources.read_deceased_excel_ministry_files(),
key=lambda x: x['max_date'])[-1]
if spa_report:
accumulated_incidence_table, dates = _create_accumulate_indicence_table_for_spa_chart_from_report(report, 15)
else:
accumulated_incidence_table, ccaas, dates = _create_table_for_chart_from_dict(accumulaed_incidence, desired_ccaas)
title = 'Resumen situación Covid-19'
if spa_report:
title += ' España'
elif only_one_ccaa:
title += ': ' + data_sources.convert_to_ccaa_name(ccaa_iso)
else:
title += ' por comunidad autónoma'
html = HEADER.format(title)
html += HEADER2
js_function_name = 'drawAccumulatedCasesIncidence'
columns = [('date', 'fecha')]
if spa_report:
columns.extend([('number', 'España')])
else:
columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)])
title = 'Incidencia acumulada por 100.000 hab. (15 días)'
width =900
height = 800
rangeslider_height = 50
js_sizes = {'dashboard': {'height': height + rangeslider_height, 'width': width},
'chart': {'height': height, 'width': width},
'rangeslider': {'height': rangeslider_height, 'width': 600},
}
div_sizes = {}
for html_element in js_sizes:
div_sizes[html_element] = {}
div_sizes[html_element]['height'] = f"{js_sizes[html_element]['height']}px"
div_sizes[html_element]['width'] = f"{js_sizes[html_element]['width']}px"
slider_config = {'column_controlled': 'fecha',
'min_value': dates[0],
'max_value': dates[-1],
'min_init_value': date_range[0],
'max_init_value': date_range[-1]}
div_ids_accumulated_cases = {'dashboard': 'accumulated_cases_dashboard',
'chart': 'accumulated_cases_chart',
'rangeslider': 'accumulated_cases_rangeslider'}
html += material_line_chart.create_chart_js_with_slider(js_function_name,
slider_config,
div_ids_accumulated_cases,
title,
columns,
accumulated_incidence_table,
sizes=js_sizes)
js_function_names = {'hospitalized': 'drawHospitalized',
'icu': 'drawICU',
'deceased': 'drawDeceased'}
div_ids = {'hospitalized': 'hospitalized_chart',
'icu': 'icu_chart',
'deceased': 'deceased_chart'
}
titles = {'hospitalized': 'Num. hospitalizaciones por 100.000 hab. (media 7 días)',
'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)',
'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)'
}
if False:
if spa_report:
rolling_means = ministry_datasources.get_ministry_rolling_mean_spa()
titles = {'hospitalized': 'Num. hospitalizaciones. (media 7 días)',
'icu': 'Num. ingresos UCI. (media 7 días)',
'deceased': 'Num. fallecidos. (media 7 días)'
}
else:
rolling_means = ministry_datasources.get_ministry_rolling_mean()
titles = {'hospitalized': 'Num. hospitalizaciones por 100.000 hab. (media 7 días)',
'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)',
'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)'
}
div_ids_hospitalized = {'dashboard': 'hospitalized_dashboard',
'chart': 'hospitalized_chart',
'rangeslider': 'hospitalized_rangeslider'}
div_ids_deceased = {'dashboard': 'deceased_dashboard',
'chart': 'deceased_chart',
'rangeslider': 'deceased_rangeslider'}
div_ids = {'hospitalized': div_ids_hospitalized,
'deceased': div_ids_deceased,
}
if False:
dframe = rolling_means['hospitalized']
if spa_report:
columns = [('date', 'fecha'), ('number', 'España')]
table = _create_table_for_chart_from_series(dframe)
else:
populations = [data_sources.get_population(ccaa) for ccaa in dframe.index]
dframe = dframe.divide(populations, axis=0) * 1e5
table, ccaas, _ = _create_table_for_chart_from_dframe(dframe, desired_ccaas)
columns = [('date', 'fecha')]
columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas])
key = 'hospitalized'
hospitalized_slider_config = {'column_controlled': 'fecha',
'min_value': dates[0],
'max_value': dates[-1],
'min_init_value': date_range[0],
'max_init_value': datetime.datetime.now()}
html += material_line_chart.create_chart_js_with_slider(js_function_names[key],
hospitalized_slider_config,
div_ids[key],
title=titles[key],
columns=columns,
data_table=table,
sizes=js_sizes)
num_days = 7
key = 'deceased'
deaths_dframe = deaths['dframe']
if spa_report:
spa_deaths = deaths_dframe.sum(axis=0)
deaths_rolling_mean = spa_deaths.rolling(num_days, center=True, min_periods=num_days).mean().dropna()
table = _create_table_for_chart_from_series(deaths_rolling_mean)
columns = [('date', 'fecha'), ('number', 'España')]
else:
deaths_rolling_mean = deaths_dframe.rolling(num_days, center=True, min_periods=num_days, axis=1).mean()
deaths_rolling_mean = deaths_rolling_mean.dropna(axis=1, how='all')
populations = [data_sources.get_population(ccaa) for ccaa in deaths_rolling_mean.index]
deaths_rolling_mean = deaths_rolling_mean.divide(populations, axis=0) * 1e5
table, ccaas, _ = _create_table_for_chart_from_dframe(deaths_rolling_mean, desired_ccaas)
columns = [('date', 'fecha')]
columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas])
html += material_line_chart.create_chart_js_with_slider(js_function_names[key],
slider_config,
div_ids[key],
title=titles[key],
columns=columns,
data_table=table,
sizes=js_sizes)
html += ' </script>\n </head>\n <body>\n'
today = datetime.datetime.now()
html += '<p><a href="../">Menu</a></p>'
html += f'<p>Informe generado el día: {today.day}-{today.month}-{today.year}</p>'
html += f'<p>Este informe está generado para uso personal por <a href="https://twitter.com/jblanca42">@jblanca42</a>, pero lo sube a la web por si le pudiese ser de utilidad a alguien más.</p>'
html += f'<p>El código utilizado para generarlo se encuentra en <a href="https://github.com/JoseBlanca/seguimiento_covid">github</a>, si encuentras algún fallo o quieres mejorar algo envía un mensaje o haz un pull request.</p>'
if desired_ccaas:
index = [ccaa for ccaa in deaths['dframe'].index if is_desired_ccaa(ccaa, desired_ccaas)]
tot_deaths = deaths['dframe'].loc[index, :].values.sum()
else:
tot_deaths = deaths['dframe'].values.sum() + deaths['unassinged_deaths']
html += f'<p>Número total de fallecidos: {tot_deaths}</p>'
if spa_report:
death_rate = round(sum(data_sources.POPULATION.values()) / tot_deaths)
html += f'<p>Una de cada {death_rate} personas han fallecido.</p>'
elif desired_ccaas and len(desired_ccaas) == 1:
death_rate = round(data_sources.get_population(desired_ccaas[0]) / tot_deaths)
html += f'<p>Una de cada {death_rate} personas han fallecido en esta comunidad autónoma.</p>'
else:
deaths_per_ccaa = deaths['dframe'].sum(axis=1)
populations = [data_sources.get_population(ccaa) for ccaa in deaths_per_ccaa.index]
populations = pandas.Series(populations, index=deaths_per_ccaa.index)
death_rate = (populations / deaths_per_ccaa).round().sort_values().astype(int)
html += '<p>¿Una de cada cuántas personas han fallecido por comunidad autónoma?</p>'
html += _write_table_from_series(death_rate)
if False:
for key in ['hospitalized']:
html += f"<p>{DESCRIPTIONS[spa_report][key]}</p>\n"
html += material_line_chart.create_chart_with_slider_divs(div_ids[key],
sizes=div_sizes)
html += f"<p>{DESCRIPTIONS[spa_report]['incidencia_acumulada']}</p>\n"
html += material_line_chart.create_chart_with_slider_divs(div_ids_accumulated_cases,
sizes=div_sizes)
for key in ['deceased']:
html += f"<p>{DESCRIPTIONS[spa_report][key]}</p>\n"
html += material_line_chart.create_chart_with_slider_divs(div_ids[key],
sizes=div_sizes)
html += ' </body>\n</html>'
out_path.open('wt').write(html)
if __name__ == '__main__':
ten_days_ago = datetime.datetime.now() - datetime.timedelta(days=10)
forty_days_ago = datetime.datetime.now() - datetime.timedelta(days=40)
first_date = datetime.datetime(2020, 9, 1)
out_dir = config.HTML_REPORTS_DIR
out_dir.mkdir(exist_ok=True)
out_path = out_dir / 'situacion_covid_por_ca.html'
write_html_report(out_path, date_range=[forty_days_ago, ten_days_ago])
|
7,817 | 907f0564d574f197c25b05a79569a8b6f260a8cd | import math
from os.path import join, relpath, dirname
from typing import List, Tuple
from common import read_input
convert_output = List[str]
class GridWalker:
def __init__(self):
self._current_pos = [0, 0]
self._heading = math.pi / 2
@property
def position(self):
return self._current_pos
@property
def distance_from_home(self):
return int(abs(self._current_pos[0]) + abs(self._current_pos[1]))
def __repr__(self):
return (
f"Self @ {self.position[0]},{self.position[1]} - {self.distance_from_home}"
)
def walk(self, direction: str):
heading, distance = self._convert_direction(direction)
self._heading += heading
self._current_pos[0] += int(math.cos(self._heading) * distance)
self._current_pos[1] += int(math.sin(self._heading) * distance)
@staticmethod
def _convert_direction(direction: str) -> Tuple[float, int]:
"""Converts a direction into a heading and distance."""
direction_to_heading = {"L": math.pi / 2, "R": -math.pi / 2}
return direction_to_heading[direction[0]], int(direction[1])
def input_converter(input_line: str) -> convert_output:
return input_line.split(", ")
def solve_part1(converted_input: List[convert_output]):
walker = GridWalker()
for direction in converted_input[0]:
walker.walk(direction)
return walker.distance_from_home
def solve_part2(converted_input: List[convert_output]):
return 1
if __name__ == "__main__":
raw_input = read_input(
join(relpath(dirname(__file__)), "input.txt"), input_converter
)
print(f"Solution of 2016/1 - Part 1 is '{solve_part1(raw_input)}'")
print(f"Solution of 2016/1 - Part 2 is '{solve_part2(raw_input)}'")
|
7,818 | 5b7567129d447ae2b75f4a8f9c26127f8b7553ec | #app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
#app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True;
#db = SQLAlchemy(app)
# MONGODB CREATION
#Creating a pymongo client
client = MongoClient('localhost', 27017)
#Getting the database instance
db = client['mydb']
print("Database created........")
#Verification
print("List of databases after creating new one")
print(client.list_database_names())
# DB CREATION AND INSTANTIATION #
#DB -- OPTION 1
engine = create_engine('sqlite:///test.db', echo = True)
meta = MetaData()
# Database Schema for Item and User #
Items = Table(
'Items', meta,
Column('id', Integer, primary_key = True),
Column('product_name', String),
Column('price', Float),
Column('quantity', Integer)
)
Users = Table(
'Users', meta,
Column('firstname', String),
Column('lastname', String),
Column('email', String),
Column('passwd', String),
Column('phone', Integer)
)
meta.create_all(engine)
#class Item(db.Model):
# id = db.Column(db.Integer, primary_key = True)
# product = db.Column(db.String(200))
# price = db.Column(db.Integer) |
7,819 | 772e2e0a442c1b63330e9b526b76d767646b0c7c | from PyQt5.QtWidgets import QWidget, QHBoxLayout, QGraphicsOpacityEffect, \
QPushButton
from PyQt5.QtCore import Qt
class ToolBar(QWidget):
"""
Window for entering parameters
"""
def __init__(self, parent):
super().__init__(parent)
self._main_wnd = parent
self.setAttribute(Qt.WA_StyledBackground, True)
self.setObjectName("options")
self.setStyleSheet("""
#options, #closeButton {
border-radius: 6px;
background-color: rgb(0, 0, 0);
color: #fff;
}
QToolBar {
background-color: rgb(0, 0, 0);
color: #fff;
}
""")
self.setupWidgets()
effect = QGraphicsOpacityEffect()
effect.setOpacity(0.66)
self.setGraphicsEffect(effect)
self.setMinimumWidth(220)
self.updateWidgets()
self.connectSignals()
self.setAcceptDrops(True)
def mainWnd(self):
return self._main_wnd
def setupWidgets(self):
self._layout = QHBoxLayout()
self._layout.setContentsMargins(6, 5, 12, 12)
self._layout.setSpacing(0)
self._open_file = self.addButton("O", self._main_wnd.onOpenFile)
self._layout.addSpacing(8)
self._add_text = self.addButton("T", self._main_wnd.onAddText)
self._layout.addStretch()
self.setLayout(self._layout)
def addButton(self, text, action):
button = QPushButton(text)
button.clicked.connect(action)
self._layout.addWidget(button)
return button
def connectSignals(self):
pass
def updateWidgets(self):
pass
|
7,820 | 243794d36a1c6861c2c3308fe6a52ec19b73df72 | """Activate coverage at python startup if appropriate.
The python site initialisation will ensure that anything we import
will be removed and not visible at the end of python startup. However
we minimise all work by putting these init actions in this separate
module and only importing what is needed when needed.
For normal python startup when coverage should not be activated the pth
file checks a single env var and does not import or call the init fn
here.
For python startup when an ancestor process has set the env indicating
that code coverage is being collected we activate coverage based on
info passed via env vars.
"""
import os
def multiprocessing_start(obj):
cov = init()
if cov:
multiprocessing.util.Finalize(None, multiprocessing_finish, args=(cov,), exitpriority=1000)
def multiprocessing_finish(cov):
cov.stop()
cov.save()
try:
import multiprocessing.util
except ImportError:
pass
else:
multiprocessing.util.register_after_fork(multiprocessing_start, multiprocessing_start)
def init():
# Only continue if ancestor process has set everything needed in
# the env.
cov_source = os.environ.get('COV_CORE_SOURCE')
cov_config = os.environ.get('COV_CORE_CONFIG')
cov_datafile = os.environ.get('COV_CORE_DATAFILE')
if cov_datafile:
# Import what we need to activate coverage.
import coverage
# Determine all source roots.
if not cov_source:
cov_source = None
else:
cov_source = cov_source.split(os.pathsep)
if not cov_config:
cov_config = True
# Activate coverage for this process.
cov = coverage.coverage(
source=cov_source,
data_suffix=True,
config_file=cov_config,
auto_data=True,
data_file=cov_datafile
)
cov.load()
cov.start()
cov._warn_no_data = False
cov._warn_unimported_source = False
return cov
|
7,821 | d47ea763ac1a4981fc5dee67cd396ad49570f923 | #coding=utf-8
from numpy import *
#代码5-1,Logistic回归梯度上升优化算法。
def loadDataSet():
"""解析文件
Return: dataMat 文档列表 [[1,x1,x2]...]; labelMat 类别标签列表[1,0,1...]
@author:VPrincekin
"""
dataMat = []; labelMat= []
fr = open('testSet.txt')
#每行前两个分别是X1和X2,第三个只是数据对应的类别
for line in fr.readlines():
#strip()去除空格
lineArr = line.strip().split()
#为了方便计算,把X0设置为1。
dataMat.append([1.0,float(lineArr[0]),float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat,labelMat
def sigmoid(inX):
"""sigmoid函数
@author:VPrincekin
"""
return 1/(1+exp(-inX))
def gradAscent(dataMatIn,classLabels):
"""梯度上升算法
Args: dataMatIn 文档矩阵 100*3 的矩阵;classLabels 类别标签列表 1*100向量
Return: weights 回归系数矩阵
@author:VPrincekin
"""
#mat()转换为NumPy矩阵数据类型
dataMatrix = mat(dataMatIn)
#transpose()转置矩阵
labelMat = mat(classLabels).transpose()
#shape()求出矩阵的维度(行,列)
m,n = shape(dataMatrix)
#alpha 向目标移动的步长
alpha = 0.001
#maxCyles 迭代次数
maxCycles = 500
#创建一个n*1的单位矩阵
weights = ones((n,1))
#开始迭代,梯度上升
for k in range(maxCycles):
h = sigmoid(dataMatrix * weights)
error = (labelMat - h)
weights = weights + alpha * dataMatrix.transpose() * error
return weights
######################################################################################
#代码5-2,画出数据集和Logistic回归最佳拟合直线的函数。
def plotBestFit(weights):
"""
Args:weights 回归系数
@author:VPrincekin
"""
import matplotlib.pyplot as plt
#解析文件,生成文档矩阵和类别标签矩阵
dataMat,labelMat = loadDataSet()
dataArr = array(dataMat)
n = shape(dataArr)[0]
xcord1 = []; ycord1 = []
xcord2 = []; ycord2 = []
for i in range(n):
if int(labelMat[i]) == 1:
xcord1.append(dataArr[i,1]); ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1]); ycord2.append(dataArr[i,2])
#开始画图
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1,ycord1,s=30,c='red',marker='s')
ax.scatter(xcord2,ycord2,s=30,c='green')
x = arange(-3.0,3.0,0.1)
#此处设置了sigmoid函数为0,0是两个分类的分界处。w0x0+w1x1+w2x2=0
y = (-weights[0]-weights[1]*x)/weights[2]
ax.plot(x,y)
plt.xlabel('X1'); plt.ylabel('X2');
plt.show()
##############################################################################################
#代码5-3,随即梯度上升算法
def stocGradAscent0(dataMatrix,classLabels):
"""
Args: dataMatrix 文档列表; classLabels 类别标签列表
Return: weights 回归系数矩阵
@author:VPrincekin
"""
m,n = shape(dataMatrix)
alpha = 0.01
weights = ones(n)
for i in range(m):
#计算每一个样本的函数值
h = sigmoid(sum(dataMatrix[i]*weights))
#计算误差
error = classLabels[i]-h
#向梯度方向更新迭代
weights = weights + alpha*error*dataMatrix[i]
return weights
##############################################################################################
#代码5-4,改进的随即梯度上升算法
def stocGradAscent1(dataMatrix,classLabels,numIter=150):
"""
Args:dataMatrix 文档列表; classLabels 类别标签列表; numIter 迭代次数,如果没有给定,默认迭代150次。
Return:weights 回归系数矩阵
@author:VPrincekin
"""
m,n = shape(dataMatrix)
weights = ones(n)
for j in range(numIter):
dataIndex = range(m)
for i in range(m):
#第一处改进,alpha在每次迭代的时候都会调整,这会缓解数据波动或者高频波动。
alpha = 4/(1.0+i+j)+0.01
#第二处改进,通过随机选取样本来更新回归系数。
#这种方法将减少周期性波动,每次随即从列表中选出一个值,然后从列表中删掉该值。
randIndex=int(random.uniform(0,len(dataIndex)))
h = sigmoid(sum(dataMatrix[randIndex]*weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
return weights
########################################################################################################
#代码5-5,Logistic回归分类函数
def classifyVector(inX,weights):
"""测试算法
Args: inX 测试样本; weigths 训练算法得到的回归系数
Return: 返回类别,0或1.
@author:VPrincekin
"""
prob = sigmoid(sum(inX*weights))
if prob>0.5:
return 1.0
else:
return 0.0
def colicTest():
"""测试Logistic回归算法
Args: None
Return: Logistic回归算法错误率
"""
#每个样本有21个特征,一个类别。
frTrain = open('horseColicTraining.txt')
frTest = open('horseColicTest.txt')
trainingSet = []; trainingLabels = []
#开始解析训练文本,通过stocGradAscent1()计算并返回,回归系数向量。
for line in frTrain.readlines():
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(currLine[21]))
trainWeights = stocGradAscent1(array(trainingSet),trainingLabels,500)
#开始解析测试文本,计算算法的错误率。
errorCount = 0; numTestVec = 0.0
for line in frTest.readlines():
numTestVec += 1.0
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
if int(classifyVector(array(lineArr),trainWeights)) != int(currLine[21]):
errorCount += 1
errorRate = (float(errorCount)/numTestVec)
print('the error rata of this test is : %f' % errorRate)
return errorRate
def multiTest():
"""调用colicTest()多次并求结果的平均值。
@author:VPrincekin
"""
numTests = 10; errorSum = 0.0
for k in range(numTests):
errorSum += colicTest()
print("after %d iterations the average error rate is : %f " %(numTests,errorSum/float(numTests)))
|
7,822 | e0fbb5ad6d822230865e34c1216b355f700e5cec | from bisect import bisect_left as bisect
while True:
xp, yp = set(), set()
veneer = []
W, H = map(int, input().split())
if not W:
break
N = int(input())
for i in range(N):
x1, y1, x2, y2 = map(int, input().split())
veneer.append((x1, y1, x2, y2))
xp.add(x1)
xp.add(x2)
yp.add(y1)
yp.add(y2)
xp = list(xp)
yp = list(yp)
wa = [[0 for x in range(len(xp) + 1)] for y in range(len(yp) + 1)]
print()
for v in veneer:
xi1 = bisect(xp, v[0])
xi2 = bisect(xp, v[1])
yi1 = bisect(yp, v[2])
yi2 = bisect(yp, v[3])
print(xi1, yi1, xi2, yi2)
wa[yi1][xi1] += 1
wa[yi2 + 1][xi1] -=1
wa[yi1][xi2 + 1] -=1
mem = [[0 for x in xp] for y in yp]
for y, _ in enumerate(yp):
for x, _ in enumerate(xp):
mem[y][x] += wa[y][x]
if y > 0:
mem[y][x] += mem[y - 1][x]
if x > 0:
mem[y][x] += mem[y][x - 1]
print(wa[y])
|
7,823 | 34009d1aa145f4f5c55d0c5f5945c3793fbc6429 | with open('vocabulary.txt', 'r') as f:
for line in f:
information = line.strip().split(': ')
# print(information[0], information[1])
question = information[1]
answer = information[0]
my_answer = input(f'{question}:')
if my_answer == answer:
print('맞았습니다!')
else:
print(f'아쉽습니다. 정답은 {answer}입니다.')
|
7,824 | b1a6593e7b528238e7be5ea6da4d1bfee0d78067 | import serial
import mysql.connector
ser = serial.Serial('/dev/serial0', 9600)
while True:
data = ser.readline()
if data[0]==";":
print(data)
data = data.split(";")
if data[1] == "1":
fonction = data[1]
add = data[2]
tmp = data[3]
debit = data[4]
ser.write([123])
#test affichage
print "Save in DB"
print "fonction :",fonction
print "addresse :",add
print "temperature :",tmp
print "Debit :",debit
conn = mysql.connector.connect(host="mysql-ormeaux.alwaysdata.net",user="ormeaux",password="pGYw478Vy", database="ormeaux_29")
cursor = conn.cursor()
cursor = conn.cursor()
requete = "INSERT INTO mesures(id_bassins,temperature, debit) VALUES (%s, %s, %s)"
valeurs = (add,tmp,debit)
cursor.execute(requete,valeurs)
conn.commit()
conn.close()
|
7,825 | 0509afdce0d28cc04f4452472881fe9c5e4fbcc4 | from rest_framework import serializers
from .models import *
class MovieSerializer(serializers.Serializer):
movie_name = serializers.ListField(child=serializers.CharField())
class FilmSerializer(serializers.ModelSerializer):
class Meta:
model = Movie
fields = '__all__' |
7,826 | c02f46e8d89dd4b141c86df461ecbb8ed608b61b | #!/usr/bin/python
import gzip
import os
infiles = []
ids=[]
ages=[]
with open('all_C_metadata.txt') as f:
f.readline()
f.readline()
for line in f:
infiles.append(line.split('\t')[0])
ids.append(line.split('\t')[1])
ages.append(line.split('\t')[2])
with open('all_C_samples/diversity.txt', 'w') as of:
#this stuff is specific to what i used if for before - not sure if you will need it
of.write('sample'+'\t' + 'age' + '\t' + 'd50' + '\n')
for i in range(len(infiles)):
infile = infiles[i]
os.system('gunzip -k %s'%infile)
with open(infile[:-3]) as f:
print infile
d50_not_reached=1
d50_clone=0
clone_count=0
read_count=0
total_clones=0
f.readline()
for line in f:
total_clones+=1
read_count+=float(line.strip().split('\t')[1])
clone_count+=1
if read_count>=.5 and d50_not_reached:
d50_clone=clone_count
d50_not_reached=0
os.system('rm %s'%infile[:-3])
of.write(ids[i] + '\t' + ages[i] + '\t' + str(d50_clone/float(total_clones))+'\n')
def d50(clones, num_Reads):
"""
clones should be a dict of clones
num_Reads is a property of a rep_seq object, so you can just
pass that if you are finding the d50 of the whole repertoire.
However, I don't think it is a property of each VJ pair, but you can pretty
easily calculate it with something like len(Reads_split_by_VJ[the_VJ_pair] )
This function will determine what percent of the top clones
make up 50% of reads (i.e. do the top X% of clones make up
50 % of reads? )
"""
d50_amount = num_Reads/2
read_count=0
for i in clones:
read_count+=clones[i].num_reads
if read_count>=d50_amount:
return i/float(len(clones))
|
7,827 | bf98e81c160d13b79ebe9d6f0487b57ad64d1322 | """
Author: Le Bui Ngoc Khang
Date: 12/07/1997
Program: Write a script that inputs a line of plaintext and a distance value and outputs an encrypted text using
a Caesar cipher. The script should work for any printable characters.
Solution:
Enter a message: hello world
Enter distance value: 3
khoor#zruog
"""
# Request the inputs
plainText = input("Enter a message: ")
distance = int(input("Enter distance value: "))
code = ""
for ch in plainText:
ordvalue = ord(ch)
cipherValue = ordvalue + distance
if cipherValue > 127:
cipherValue = distance - (127 - ordvalue + 1)
code += chr(cipherValue)
print(code) |
7,828 | b164dc8183c0dc460aa20883553fc73acd1e45ec | def count_singlekey(inputDict, keyword):
# sample input
# inputDict = {
# abName1: { dna: 'atgc', protein: 'x' }
# abName2: { dna: 'ctga', protein: 'y' }
# }
countDict = {}
for abName, abInfo in inputDict.iteritems():
if countDict.has_key(abInfo[keyword]):
countDict[abInfo[keyword]][1] += 1
else:
countDict[abInfo[keyword]] = [abName, 1]
return countDict
def count_multikey(inputDict, keywords):
# sample input
# inputDict = {
# abName1: { dna: 'atgc', protein: 'x' }
# abName2: { dna: 'ctga', protein: 'y' }
# }
#keywords = list(keywords)
keywords.sort()
keywords = tuple(keywords)
countDict = {}
for abName, abInfo in inputDict.iteritems():
combinedKey = []
for k in keywords:
combinedKey.append(abInfo[k])
combinedKey = tuple(combinedKey)
if countDict.has_key(combinedKey):
countDict[combinedKey][1] += 1
else:
countDict[combinedKey] = [abName, 1]
return countDict
|
7,829 | f6e0215f9992ceab51887aab6a19f58a5d013eb4 | from ad_api.base import Client, sp_endpoint, fill_query_params, ApiResponse
class CampaignNegativeKeywords(Client):
@sp_endpoint('/v2/sp/campaignNegativeKeywords/{}', method='GET')
def get_campaign_negative_keyword(self, keywordId, **kwargs) -> ApiResponse:
r"""
get_campaign_negative_keyword(self, keywordId, \*\*kwargs) -> ApiResponse
Gets a campaign negative keyword specified by identifier.
path **keywordId**:*number* | Required. The identifier of an existing keyword.
Returns:
ApiResponse
"""
return self._request(fill_query_params(kwargs.pop('path'), keywordId), params=kwargs)
@sp_endpoint('/v2/sp/campaignNegativeKeywords/{}', method='DELETE')
def delete_campaign_negative_keyword(self, keywordId, **kwargs) -> ApiResponse:
r"""
delete_campaign_negative_keyword(self, keywordId, \*\*kwargs) -> ApiResponse
Archives a campaign negative keyword.
path **keywordId**:*number* | Required. The identifier of an existing keyword.
Returns:
ApiResponse
"""
return self._request(fill_query_params(kwargs.pop('path'), keywordId), params=kwargs)
@sp_endpoint('/v2/sp/campaignNegativeKeywords/extended/{}', method='GET')
def get_campaign_negative_keyword_extended(self, keywordId, **kwargs) -> ApiResponse:
r"""
get_campaign_negative_keyword_extended(self, keywordId, \*\*kwargs) -> ApiResponse
Gets a campaign negative keyword that has extended data fields.
path **keywordId**:*number* | Required. The identifier of an existing keyword.
Returns:
ApiResponse
"""
return self._request(fill_query_params(kwargs.pop('path'), keywordId), params=kwargs)
@sp_endpoint('/v2/sp/campaignNegativeKeywords/extended', method='GET')
def list_campaign_negative_keywords_extended(self, **kwargs) -> ApiResponse:
r"""
list_campaign_negative_keywords_extended(self, \*\*kwargs) -> ApiResponse
Gets a list of campaign negative keywords that have extended data fields.
query **startIndex**:*integer* | Optional. 0-indexed record offset for the result set. Default value : 0
query **count**:*integer* | Optional. Number of records to include in the paged response. Defaults to max page size.
query **matchTypeFilter**:*string* | Optional. Restricts results to keywords with match types within the specified comma-separated list. Available values : negativePhrase, negativeExact.
query **keywordText**:*string* | Optional. Restricts results to keywords that match the specified text exactly.
query **campaignIdFilter**:*string* | Optional. A comma-delimited list of campaign identifiers.
query **keywordIdFilter**:*string* | Optional. Restricts results to keywords associated with campaigns specified by identifier in the comma-delimited list.
Returns:
ApiResponse
"""
return self._request(kwargs.pop('path'), params=kwargs)
@sp_endpoint('/v2/sp/campaignNegativeKeywords', method='GET')
def list_campaign_negative_keywords(self, **kwargs) -> ApiResponse:
r"""
list_campaign_negative_keywords(self, \*\*kwargs) -> ApiResponse
Gets a list of campaign negative keywords.
query **startIndex**:*integer* | Optional. 0-indexed record offset for the result set. Default value : 0
query **count**:*integer* | Optional. Number of records to include in the paged response. Defaults to max page size.
query **matchTypeFilter**:*string* | Optional. Restricts results to keywords with match types within the specified comma-separated list. Available values : negativePhrase, negativeExact.
query **keywordText**:*string* | Optional. Restricts results to keywords that match the specified text exactly.
query **campaignIdFilter**:*string* | Optional. A comma-delimited list of campaign identifiers.
query **keywordIdFilter**:*string* | Optional. Restricts results to keywords associated with campaigns specified by identifier in the comma-delimited list.
Returns:
ApiResponse
"""
return self._request(kwargs.pop('path'), params=kwargs)
@sp_endpoint('/v2/sp/campaignNegativeKeywords', method='POST')
def create_campaign_negative_keywords(self, **kwargs) -> ApiResponse:
r"""
create_campaign_negative_keywords(self, \*\*kwargs) -> ApiResponse:
Creates one or more campaign negative keywords.
body: | REQUIRED {'description': 'An array of keyword objects.}'
| '**campaignId**': *number*, {'description': 'The identifer of the campaign to which the keyword is associated.'}
| '**state**': *string*, {'description': 'The current resource state.' , 'Enum': '[ enabled ]'}
| '**keywordText**': *string*, {'description': 'The text of the expression to match against a search query.'}
| '**matchType**': *string*, {'description': 'The type of match.' , 'Enum': '[ negativeExact, negativePhrase ]'}
Returns:
ApiResponse
"""
return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs)
@sp_endpoint('/v2/sp/campaignNegativeKeywords', method='PUT')
def edit_campaign_negative_keywords(self, **kwargs) -> ApiResponse:
r"""
edit_campaign_negative_keywords(self, \*\*kwargs) -> ApiResponse:
Updates one or more campaign negative keywords.
body: | REQUIRED {'description': 'An array of campaign negative keywords with updated values.'}
| '**keywordId**': *number*, {'description': 'The identifer of the campaign to which the keyword is associated.'}
| '**state**': *string*, {'description': 'The current resource state.' , 'Enum': '[ deleted ]'}
Returns:
ApiResponse
"""
return self._request(kwargs.pop('path'), data=kwargs.pop('body'), params=kwargs)
|
7,830 | d99fd3dc63f6a40dde5a6230111b9f3598d3c5fd | from torchvision import datasets, transforms
import torch
def load_data(data_folder, batch_size, train, num_workers=0, **kwargs):
transform = {
'train': transforms.Compose(
[transforms.Resize([256, 256]),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])]),
'test': transforms.Compose(
[transforms.Resize([224, 224]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
}
data = datasets.ImageFolder(root=data_folder, transform=transform['train' if train else 'test'])
data_loader = get_data_loader(data, batch_size=batch_size,
shuffle=True if train else False,
num_workers=num_workers, **kwargs, drop_last=True if train else False)
n_class = len(data.classes)
return data_loader, n_class
def get_data_loader(dataset, batch_size, shuffle=True, drop_last=False, num_workers=0, infinite_data_loader=False, **kwargs):
if not infinite_data_loader:
return torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, drop_last=drop_last, num_workers=num_workers, **kwargs)
else:
return InfiniteDataLoader(dataset, batch_size=batch_size, shuffle=True, drop_last=drop_last, num_workers=num_workers, **kwargs)
class _InfiniteSampler(torch.utils.data.Sampler):
"""Wraps another Sampler to yield an infinite stream."""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
for batch in self.sampler:
yield batch
class InfiniteDataLoader:
def __init__(self, dataset, batch_size, shuffle=True, drop_last=False, num_workers=0, weights=None, **kwargs):
if weights is not None:
sampler = torch.utils.data.WeightedRandomSampler(weights,
replacement=False,
num_samples=batch_size)
else:
sampler = torch.utils.data.RandomSampler(dataset,
replacement=False)
batch_sampler = torch.utils.data.BatchSampler(
sampler,
batch_size=batch_size,
drop_last=drop_last)
self._infinite_iterator = iter(torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
batch_sampler=_InfiniteSampler(batch_sampler)
))
def __iter__(self):
while True:
yield next(self._infinite_iterator)
def __len__(self):
return 0 # Always return 0 |
7,831 | 04822e735c9c27f0e0fcc9727bcc38d2da84dee6 | import logging
from django.contrib.auth import get_user_model
from django.db import models
from rest_framework import serializers
from rest_framework.test import APITestCase
from ..autodocs.docs import ApiDocumentation
from .utils import Deferred
log = logging.getLogger(__name__)
def get_serializer(endpoint, method_name, dict_key='in'):
"""
Возвращает класс сериалайзера, если тот есть для данного поинта и метода.
:param `ApiEndpoint` endpoint: Поинт.
:param str method_name: Метод.
:param str dict_key: Ключ словаря с сериалайзерами, либо 'in' либо 'out'.
:return: Класс сериалайзера либо None.
"""
methods = [method_name]
# Если тестируем PATCH метод и при этом для него нет сериалайзера, используем сериалайзер от PUT.
if method_name == 'PATCH':
methods.append('PUT')
for method in methods:
if method in endpoint.serializer_classes and \
isinstance(endpoint.serializer_classes[method], dict) and \
dict_key in endpoint.serializer_classes[method]:
return endpoint.serializer_classes[method][dict_key]
def resolve_deferred(value):
"""
Заменяет `Deferred` объект на pk экземпляра модели `Deferred.model`.
:param any value: Любой объект.
"""
if isinstance(value, Deferred):
obj = model_instance(value.model, value.force_create)
return obj.pk
elif isinstance(value, dict):
return {resolve_deferred(k): resolve_deferred(v) for k,v in value.items()}
elif isinstance(value, list):
return [resolve_deferred(v) for v in value]
return value
def model_instance(model, force_create=False):
"""
Создание и получение экземпляра модели.
:param any model: Модель.
:param bool force_create: Не получать имеющийся объект, а создавать новый.
:return: Экзмепляр модели.
:rtype: models.Model.
"""
if not force_create and model.objects.all().count() > 0:
return model.objects.first()
data = {}
for field in model._meta.get_fields():
if not field.auto_created and not field.blank:
if hasattr(field, 'choices') and len(field.choices) > 0:
data[field.name] = field.choices[0][0]
elif isinstance(field, models.IntegerField):
data[field.name] = 1
elif isinstance(field, models.ForeignKey):
data[field.name] = model_instance(field.related_model)
elif isinstance(field, models.CharField):
data[field.name] = 'test'
return model.objects.create(**data)
class AutoTestCase(APITestCase):
"""
Класс для автоматического тестирования REST ручек.
"""
@classmethod
def setUpClass(cls):
"""
Создание пользователя для всех тестов, который цепляется через `settings.AUTH_USER_PK`
"""
super(AutoTestCase, cls).setUpClass()
model_instance(get_user_model())
def setUp(self):
"""
Подготовка к тестовому запросу, получение данных из словаря REQUESTS_DATA
и создание / получение необходимых объектов, ключи которых используются в URL.
"""
self.endpoint, self.method, self.serializer, self.request_type = REQUESTS_DATA.get(self._testMethodName)
path = self.endpoint.path
if '<pk>' in path:
obj = model_instance(self.endpoint.callback.cls.queryset.model)
path = path.replace('<pk>', str(obj.pk))
self.path = path
if hasattr(self.endpoint.callback.cls, 'test_setup'):
getattr(self.endpoint.callback.cls, 'test_setup')(self)
def base_test_method(self):
"""
Метод, который проверяет полученный от итератора endpoint.
"""
request_method = getattr(self.client, self.method.lower())
if self.serializer:
if self.request_type == 'all':
# Запрос со всеми данными на входе.
data = self.prepare_request_data(self.serializer)
response = self.send_request(request_method, self.path, data, 'json')
self.check_response_is_valid(response)
elif self.request_type == 'only_required':
# Запрос только с обязательными данными.
data = self.prepare_request_data(self.serializer, only_required=True)
response = self.send_request(request_method, self.path, data, 'json')
self.check_response_is_valid(response)
elif self.request_type == 'without_required':
# Запрос не со всеми обязательными данными.
data = self.prepare_request_data(self.serializer, only_required=True)
data.popitem()
response = self.send_request(request_method, self.path, data, 'json')
self.assertTrue(400 <= response.status_code < 500)
else:
# Запрос без данных на входе.
response = self.send_request(request_method, self.path)
self.check_response_is_valid(response)
def prepare_request_data(self, field, only_required=False):
"""
Подготавливает данные для запроса.
:param rest_framework.fields.Field, rest_framework.serializers.Serializer field: Объект филда или сериалазейра.
:param bool only_required: Использовать ли только обязательные поля.
:return: Данные для отправки клиентом.
:rtype: list, dict.
"""
# Если это класс сериалайзера, а не его экземпляр.
if isinstance(field, serializers.SerializerMetaclass):
return self.prepare_request_data(field())
# Либо имеется тестовое значение установленное через `test_helper_factory`.
elif hasattr(field, 'test_helper_value'):
return resolve_deferred(field.test_helper_value)
# Либо это список.
elif isinstance(field, serializers.ListSerializer):
return [self.prepare_request_data(field.child)]
# Либо это экземпляр сериалайзера.
elif isinstance(field, serializers.BaseSerializer):
return {k: self.prepare_request_data(v) for k,v in field.get_fields().items() \
if (not only_required) or (only_required and v.required)}
# Либо это поле.
elif isinstance(field, serializers.ChoiceField):
for val, verbose in field.choices.items():
return val
elif isinstance(field, serializers.PrimaryKeyRelatedField):
return model_instance(field.queryset.model).pk
elif isinstance(field, serializers.CharField):
return 'test'
elif isinstance(field, serializers.IntegerField):
return 1
def send_request(self, request_method, path, data=None, format_type=None):
"""
Отправляет запрос.
:param method request_method: Метод клиента.
:param str path: URL.
:param dict data: Данные для запроса.
:param str format_type: Формат данных.
:return: Ответ.
:rtype: `rest_framework.response.Response`.
"""
kwargs = dict(data=data, format=format_type)
if hasattr(self.endpoint.callback.cls, 'test_prepare_request'):
kwargs = getattr(self.endpoint.callback.cls, 'test_prepare_request')(self, **kwargs)
self.data = data
print_strings = ['Отправка {} на {}'.format(request_method.__name__, path)]
if data is not None:
print_strings.append('с данными')
log.debug(' '.join(print_strings + ['\n']))
return request_method(path, **kwargs)
def check_response_is_valid(self, response):
"""
Проверяет ответ на успешность и корректность.
:param `rest_framework.response.Response` response: Ответ.
"""
self.assertTrue(200 <= response.status_code < 400)
response_serializer = get_serializer(self.endpoint, self.method, 'out')
if response_serializer:
self.check_response_data(response.data, response_serializer)
def check_response_data(self, data, field):
"""
Проверяем данные в ответе.
:param any data: Словарь `Response.data` либо одно из его значений.
:param any field: Сериалайзер или поле для сравнения данных в ответе.
"""
# @TODO: Проверка с помощью данных сериалайзера на данный момент не возможна
# т.к. что-то происходит с QuerySet'ом из-за чего serializer.data вызывает RuntimeError.
'''
if method_name == 'POST' and method_name in self.endpoint.serializer_classes and \
'out' in self.endpoint.serializer_classes[method_name]:
serializer = self.endpoint.serializer_classes[method_name]['out'](
self.endpoint.callback.cls.queryset, many=True)
self.assertEqual(response.data, serializer.data)
'''
# Если это класс сериалайзера, а не его экземпляр.
if isinstance(field, serializers.SerializerMetaclass):
return self.check_response_data(data, field())
'''
if 'results' in data and 'count' in data:
for item in data['results']:
self.check_response_data(item, out_fields)
else:
for field_name, value in data.items():
try:
field_data = fields[field_name]
except:
import pdb; pdb.set_trace()
# Проверка наличия филда среди ожидаемых в ответе
self.assertTrue(field_name in available_fields)
available_fields.remove(field_name)
if field_name in required_fields:
required_fields.remove(field_name)
if field_data['sub_fields']:
if hasattr(field_data['field_instance'], 'test_helper_as_dict'):
for key, item in data[field_name].items():
self.check_response_data(item, field_data['sub_fields'])
else:
self.check_response_data(data[field_name], field_data['sub_fields'])
else:
field_instance = field_data['field_instance']
# Проверка значения если филд обязателен или имеется значение в ответе
if field_data['required'] or value is not None:
# Проверка типа филда
self.assertEquals(type(field_instance.to_representation(value)), type(value))
# Проверка коррекности значения (иначе возникнет исключение)
# self.assertRaises(ValidationError, field_instance.to_internal_value(value))
field_instance.to_internal_value(value)
# Проверяем чтобы все обязательные поля в ответе были
self.assertEqual(len(required_fields), 0)
'''
ENDPOINTS = ApiDocumentation().get_endpoints()
ENDPOINTS = [ep for ep in ENDPOINTS]
# Собираем список запросов.
REQUESTS_LIST = []
for endpoint in ENDPOINTS:
for method in endpoint.allowed_methods:
serializer = get_serializer(endpoint, method)
if serializer:
# @TODO: Доработать тестирование без обязательных данных в запросе (without_required).
# for request_type in ('all', 'only_required', 'without_required'):
for request_type in ('all', 'only_required'):
REQUESTS_LIST.append((endpoint, method, serializer, request_type))
else:
REQUESTS_LIST.append((endpoint, method, serializer, None))
REQUESTS_DATA = {}
# Добавляем для них тестовые методы.
for endpoint, method, serializer, request_type in REQUESTS_LIST:
method_name = 'test_{}_{}_{}'.format(endpoint.callback.__name__, method, request_type)
REQUESTS_DATA[method_name] = (endpoint, method, serializer, request_type)
setattr(AutoTestCase, method_name, AutoTestCase.base_test_method)
|
7,832 | e15ea7d167aad470d0a2d95a8a328b35181e4dc3 | ##############################################################################
# Copyright by The HDF Group. #
# All rights reserved. #
# #
# This file is part of HSDS (HDF5 Scalable Data Service), Libraries and #
# Utilities. The full HSDS copyright notice, including #
# terms governing use, modification, and redistribution, is contained in #
# the file COPYING, which can be found at the root of the source code #
# distribution tree. If you do not have access to this file, you may #
# request a copy from help@hdfgroup.org. #
##############################################################################
#
# Simple looger for hsds
#
import asyncio
from aiohttp.web_exceptions import HTTPServiceUnavailable
from .util.domainUtil import getDomainFromRequest
req_count = {"GET": 0, "POST": 0, "PUT": 0, "DELETE": 0, "num_tasks": 0}
log_count = {"DEBUG": 0, "INFO": 0, "WARN": 0, "ERROR": 0}
# the following defaults will be adjusted by the app
config = {"log_level": "DEBUG", "prefix": ""}
def debug(msg):
if config["log_level"] == "DEBUG":
print(config["prefix"] + "DEBUG> " + msg)
log_count["DEBUG"] += 1
def info(msg):
if config["log_level"] not in ("ERROR", "WARNING", "WARN"):
print(config["prefix"] + "INFO> " + msg)
log_count["INFO"] += 1
def warn(msg):
if config.get("log_level") != "ERROR":
print(config["prefix"] + "WARN> " + msg)
log_count["WARN"] += 1
def warning(msg):
if config.get("log_level") != "ERROR":
print(config["prefix"] + "WARN> " + msg)
log_count["WARN"] += 1
def error(msg):
print(config["prefix"] + "ERROR> " + msg)
log_count["ERROR"] += 1
def request(req):
app = req.app
domain = getDomainFromRequest(req, validate=False)
if domain is None:
print("REQ> {}: {}".format(req.method, req.path))
else:
print("REQ> {}: {} [{}]".format(req.method, req.path, domain))
if req.path in ("/about", "/register", "/info", "/nodeinfo", "/nodestate", "/register"):
# always service these state requests regardles of node state and task load
return
node_state = app["node_state"] if "node_state" in app else None
if node_state != "READY":
warning(f"returning 503 - node_state: {node_state}")
raise HTTPServiceUnavailable()
if req.method in ("GET", "POST", "PUT", "DELETE"):
req_count[req.method] += 1
num_tasks = len(asyncio.Task.all_tasks())
active_tasks = len([task for task in asyncio.Task.all_tasks() if not task.done()])
req_count["num_tasks"] = num_tasks
if config["log_level"] == "DEBUG":
debug(f"num tasks: {num_tasks} active tasks: {active_tasks}")
max_task_count = app["max_task_count"]
if app["node_type"] == "sn" and max_task_count and active_tasks > max_task_count:
warning(f"more than {max_task_count} tasks, returning 503")
raise HTTPServiceUnavailable()
def response(req, resp=None, code=None, message=None):
level = "INFO"
if code is None:
# rsp needs to be set otherwise
code = resp.status
if message is None:
message=resp.reason
if code > 399:
if code < 500:
level = "WARN"
else:
level = "ERROR"
log_level = config["log_level"]
prefix = config["prefix"]
if log_level in ("DEBUG", "INFO") or (log_level == "WARN" and level != "INFO") or (log_level == "ERROR" and level == "ERROR"):
print("{}{} RSP> <{}> ({}): {}".format(prefix, level, code, message, req.path))
|
7,833 | 4ecd756b94b0cbab47a8072e9bccf26e2dd716d0 | import pytest
import numpy as np
from GSPA_DMC import SymmetrizeWfn as symm
def test_swap():
cds = np.load('h3o_data/ffinal_h3o.npy')
dws = np.load('h3o_data/ffinal_h3o_dw.npy')
cds = cds[:10]
a = symm.swap_two_atoms(cds, dws, atm_1=1,atm_2=2)
b = symm.swap_group(cds, dws, atm_list_1=[0,1],atm_list_2=[2,3])
assert True |
7,834 | d218b72d1992a30ad07a1edca1caf04b7b1985f6 | from introduction import give_speech
from staring import stare_at_people
from dow_jones import visualize_dow_jones
from art_critic import give_art_critiques
from hipster import try_hipster_social_interaction
from empathy import share_feelings_with_everyone
from slapstick import perform_slapstick_humor
from ending import finish
def performance():
give_speech()
visualize_dow_jones()
give_art_critiques()
stare_at_people()
try_hipster_social_interaction()
share_feelings_with_everyone()
perform_slapstick_humor()
finish()
if __name__ == '__main__':
performance()
|
7,835 | 7edd833103e1de92e57559c8a75379c26266963b | # -*- encoding: utf-8 -*-
from openerp.tests.common import TransactionCase
from openerp.exceptions import ValidationError
class GlobalTestOpenAcademySession(TransactionCase):
'''
Global Test to openacademy session model.
Test create session and trigger constraint
'''
# Pseudo-constructor methods
def setUp(self):
# Define Global Variable to tests methods
super(GlobalTestOpenAcademySession, self).setUp()
self.session = self.env['openacademy.session']
self.partner_vauxoo = self.env.ref('base.res_partner_23')
self.course_id = self.env.ref('openacademy.course3')
self.partner_attende = self.env.ref('base.res_partner_5')
# Generic Methods
# Test Methods
def test_05_instructor_is_attendee(self):
'''
Check raise: "A session's instructor can't be an attendee"
'''
with self.assertRaisesRegexp(
ValidationError,
"A session's instructor can't be an attendee"):
self.session.create({
'name': 'Session Test 1',
'seats': 1,
'user_id': self.partner_vauxoo.id,
'attendee_ids': [(6, 0, [self.partner_vauxoo.id])],
'course_id': self.course_id.id
})
def test_10_wkf_done(self):
'''
Check that workflow work fine!
'''
session_test = self.session.create({
'name': 'Session Test 2',
'seats': 2,
'user_id': self.partner_vauxoo.id,
'attendee_ids': [(6, 0, [self.partner_attende.id])],
'course_id': self.course_id.id
})
# Check Initial State
self.assertEqual(session_test.state, 'draft', 'Initial state should '
'be in draft')
# Check next state an check it
session_test.signal_workflow('button_confirm')
self.assertEqual(session_test.state, 'confirmed', "Signal Confirm "
"don't work")
# Check next state an check it
session_test.signal_workflow('button_done')
self.assertEqual(session_test.state, 'done', "Signal Done don't work")
# self.env.cr.commit() Only for test data generated for test.
# Please don't use
|
7,836 | 94e9e7c4c09c8c4de4c8f2649707a949d5f5f856 | from django.db import models
from django.contrib.auth.models import AbstractUser
from django.db.models import Max
from django.core.validators import RegexValidator
from django.utils import timezone
class User(AbstractUser):
is_developer = models.BooleanField('developer status', default=False)
is_marketing = models.BooleanField('marketing status', default=False)
email = models.EmailField(unique=True, null=True, blank=True)
def __str__(self):
return self.username
class Application(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
app_code = models.CharField(max_length=30, blank=True, null=True)
name = models.CharField(max_length=100, blank=True, null=True)
is_archived = models.BooleanField(default=False)
def __str__(self):
return self.name
def save(self, **kwargs):
if not self.id:
max = Application.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "APP" + "{0:03d}".format(max)
super().save(*kwargs)
class Page(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=5)
application = models.ForeignKey(Application, on_delete=models.CASCADE, related_name='applications')
name = models.CharField(max_length=100)
is_archived = models.BooleanField(default=False)
def __str__(self):
return self.name
def save(self, **kwargs):
if not self.id:
max = Page.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "PG" + "{0:03d}".format(max)
super().save(*kwargs)
class Location(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
loc_code = models.CharField(max_length=30, null=True, blank=True, unique=True)
page = models.ForeignKey(Page, on_delete=models.CASCADE, related_name='pages')
is_slider = models.BooleanField(default=False)
is_active = models.BooleanField(default=False)
name = models.CharField(max_length=100)
width = models.IntegerField()
height = models.IntegerField()
def __str__(self):
return self.name
def save(self, **kwargs):
if not self.id:
max = Location.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "LOC" + "{0:03d}".format(max)
super().save(*kwargs)
class Banner(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=5)
name = models.CharField(max_length=100)
caption = models.TextField()
description = models.TextField(blank=True, null=True)
image = models.ImageField(upload_to='images/', verbose_name='Banner', blank=True)
height = models.IntegerField()
width = models.IntegerField()
is_archived = models.BooleanField(default=False)
def __str__(self):
return self.name
def delete(self, *args, **kwargs):
self.image.delete(save=False)
super(Banner, self).delete(*args, **kwargs)
def save(self, **kwargs):
if not self.id:
max = Banner.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "BN" + "{0:03d}".format(max)
super().save(*kwargs)
class Campaign(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
location = models.ForeignKey(Location, on_delete=models.CASCADE, related_name='locations')
campaign_code = models.CharField(max_length=30, null=True, blank=True)
priority = models.IntegerField(null=True, blank=True)
date_created = models.DateField(null=True, blank=True)
date_updated = models.DateField(null=True, blank=True)
valid_date_start = models.DateField(null=True, blank=True)
valid_date_end = models.DateField(null=True, blank=True)
def save(self, **kwargs):
if not self.id:
max = Campaign.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "CMP" + "{0:03d}".format(max)
super().save(*kwargs)
class Installation(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
banner = models.ForeignKey(Banner, on_delete=models.CASCADE, related_name='banners', blank=True, null=True)
campaign = models.ForeignKey(Campaign, on_delete=models.CASCADE, related_name='campaigns')
redirect = models.URLField(null=True, blank=True)
def save(self, **kwargs):
if not self.id:
max = Installation.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "INS" + "{0:03d}".format(max)
super().save(*kwargs)
source_choices = (
('random', 'Generate nomor secara acak'),
('csv', 'Upload file .csv'),
)
class ContactSource(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=9)
source = models.CharField(max_length=30, choices=source_choices)
def __str__(self):
return self.source
def save(self, **kwargs):
if not self.id:
max = ContactSource.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "CONSRC" + "{0:03d}".format(max)
super().save(*kwargs)
class Contact(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
source = models.ForeignKey(ContactSource, on_delete=models.CASCADE, related_name='contactsources')
name = models.CharField(max_length=100)
numbers = models.FileField(upload_to='pickles/contact/')
is_deleted = models.BooleanField(default=False)
deleted_datetime = models.DateTimeField(blank=True, null=True)
def __str__(self):
return self.name
def save(self, **kwargs):
if not self.id:
max = Contact.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "CON" + "{0:03d}".format(max)
super().save(*kwargs)
class GenerateContact(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=9)
contact = models.ForeignKey(Contact, on_delete=models.CASCADE, related_name='contact')
first_code = models.CharField(max_length=4, validators=[RegexValidator(r'^\d{0,10}$')])
digits = models.CharField(max_length=30, validators=[RegexValidator(r'^\d{0,10}$')])
generate_numbers = models.CharField(max_length=30, validators=[RegexValidator(r'^\d{0,10}$')])
def save(self, **kwargs):
if not self.id:
max = GenerateContact.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "GENCON" + "{0:03d}".format(max)
super().save(*kwargs)
status_choices = (
('complete', 'Sudah Dikirim'),
('uncomplete', 'Belum Dikirim'),
)
class SMSBlast(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=6)
message_title = models.CharField(max_length=100)
message_text = models.CharField(max_length=160)
send_date = models.DateField(null=True, blank=True)
send_time = models.TimeField(null=True, blank=True)
is_now = models.BooleanField(default=False)
def __str__(self):
return self.message_title
def save(self, **kwargs):
if not self.id:
max = SMSBlast.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "SMS" + "{0:03d}".format(max)
super().save(*kwargs)
class ContactAndSMS(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=12)
contact = models.ForeignKey(Contact, on_delete=models.CASCADE, related_name='smsncon_contact')
smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE, related_name='smsncon_smsblast')
def save(self, **kwargs):
if not self.id:
max = ContactAndSMS.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "CONANDSMS" + "{0:03d}".format(max)
super().save(*kwargs)
class SMSBlastJob(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=9)
job_id = models.CharField(max_length=100, blank=True, null=True)
contact = models.ForeignKey(Contact, on_delete=models.CASCADE, related_name='contact_job')
smsblast = models.ForeignKey(SMSBlast, on_delete=models.CASCADE, related_name='smsblast_job')
def __str__(self):
return self.job_id
def save(self, **kwargs):
if not self.id:
max = SMSBlastJob.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "SMSJOB" + "{0:03d}".format(max)
super().save(*kwargs)
class SMSStatus(models.Model):
id = models.CharField(primary_key=True, editable=False, max_length=10)
job = models.ForeignKey(SMSBlastJob, on_delete=models.CASCADE, related_name='job_status')
contact = models.ForeignKey(Contact, on_delete=models.CASCADE, related_name='contact_status')
status = models.FileField(upload_to='pickles/status/')
def __str__(self):
return self.job_id
def save(self, **kwargs):
if not self.id:
max = SMSStatus.objects.aggregate(id_max=Max('id'))['id_max']
if max is not None:
max = max[-3:]
max = int(max)
max += 1
else:
max = 1
self.id = "SMSSTAT" + "{0:03d}".format(max)
super().save(*kwargs) |
7,837 | 8c055816def1c0a19e672ab4386f9b9a345b6323 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import cProfile
import re
import pstats
import os
import functools
# cProfile.run('re.compile("foo|bar")')
def do_cprofile(filename):
"""
decorator for function profiling
:param filename:
:return:
"""
def wrapper(func):
@functools.wraps(func)
def profiled_func(*args, **kwargs):
# Flag for do profiling or not.
# DO_PROF = os.getenv('PROFILING')
DO_PROF = True
if DO_PROF:
profile = cProfile.Profile()
profile.enable()
result = func(*args, **kwargs)
profile.disable()
# Sort stat by internal time.
sortby = 'tottime'
ps = pstats.Stats(profile).sort_stats(sortby)
ps.dump_stats(filename)
else:
result = func(*args, **kwargs)
return result
return profiled_func
return wrapper
# print(f(5))
# A sample of catch the return result
class Memoized(object):
def __init__(self, func):
self.func = func
self.results = {}
def __get__(self, instance, cls):
self.instance = instance
return self
def __call__(self, *args):
key = args
try:
return self.results[key]
except KeyError:
self.results[key] = self.func(self.instance, *args)
return self.results[key]
@do_cprofile('./ff.prof')
# @Memoized
def f(n):
if n < 2:
return n
return f(n - 2) + f(n - 1)
f(5)
f(5)
|
7,838 | 9f31694d80f2dcc50a76b32aa296871694d3644d | from machine import Pin, PWM
import time
# externe LED zit op pin D1 (GPIO5)
PinNum = 5
# pwm initialisatie
pwm1 = PWM(Pin(PinNum))
pwm1.freq(60)
pwm1.duty(0)
step = 100
for i in range(10):
# oplichten
while step < 1000:
pwm1.duty(step)
time.sleep_ms(500)
step+=100
# uitdoven
while step > 0:
pwm1.duty(step)
time.sleep_ms(500)
step-=200
# pwm resetten
pwm1.deinit() |
7,839 | dd95d14f35b6a92b3363d99a616678da18733a61 | import os
import redis
class Carteiro():
if os.environ.get("REDIS_URL") != None:
redis_pool = redis.ConnectionPool.from_url(os.environ.get("REDIS_URL"))
else:
redis_pool = ''
def __init__(self, id, pacote):
if os.environ.get("REDIS_URL") != None:
self.redis_bd = redis.Redis(connection_pool=Carteiro.redis_pool)
else:
self.redis_bd = redis.Redis()
self.user_id = str(id)
self.pacote = bytes(str(pacote), 'ascii')
self.user_dict = self.redis_bd.hgetall(self.user_id)
def guardar_status_encomenda(self, status):
if self.redis_bd.exists(self.user_id):
self.user_dict[self.pacote] = status
self.redis_bd.hmset(self.user_id, self.user_dict)
else:
novo_user_dict = {self.pacote: status}
self.redis_bd.hmset(self.user_id, novo_user_dict)
def ler_carta(self):
carta = self.user_dict.get(self.pacote)
carta = carta.decode(encoding='UTF-8')
return carta
def roubar_pacote(self):
if self.pacote in self.user_dict:
if len(self.user_dict) == 1:
self.redis_bd.delete(self.user_id)
else:
self.redis_bd.hdel(self.user_id, self.pacote)
del self.user_dict[self.pacote]
else:
raise ValueError('codigo nao existente na base de dados')
def checar_existencia_pacote(self):
return self.user_dict.get(self.pacote) |
7,840 | 3fe98c865632c75c0ba0e1357379590f072bf662 | ../pyline/pyline.py |
7,841 | 44d9e628e31cdb36088b969da2f6e9af1b1d3efe | from collections import Counter
from copy import deepcopy
from itertools import count
from traceback import print_exc
#https://www.websudoku.com/?level=4
class SudukoBoard:
side=3
sz=side*side
class Cell:
def __init__(self,board,row,col):
self._values= [None] * SudukoBoard.sz
self._value=None
self.sets=[]
self.row=row
self.col=col
self.open=SudukoBoard.sz
self.board=board
def add_set(self,set):
self.sets.append(set)
@property
def value(self):
return self._value
@value.setter
def value(self,value):
if self._value is not None and self._value!=value:
raise ValueError("Conflicting value for cell",self.row,self.col,self._value,value)
if self._value != value:
self._value=value
self._values=[False]*SudukoBoard.sz
self._values[value-1]=True
self.open=0
self.board.open-=1
for s in self.sets:
for c in s.entries:
if c!=self:
c.cantbe(value)
def cantbe(self, value):
if self._values[value - 1] == True:
raise ValueError("Conflicting cant be for cell, already set",self.row,self.col,self._value,value)
if self._values[value-1] != False:
self._values[value-1]=False
self.open -=1
cnt=0
nidx=None
for idx,v in enumerate(self._values):
if v is None:
cnt+=1
nidx=idx
if cnt==1:
self.value=nidx+1
def couldbe(self, value):
return self._values[value - 1]
def couldbelist(self):
return [idx+1 for idx,x in enumerate(self._values) if x is None]
class Set:
def __init__(self):
self.entries=[]
def add_cell(self,cell):
self.entries.append(cell)
cell.add_set(self)
def update(self,entry):
value=entry.value
for other in self.entries:
if other==entry:
continue
if other.value == value:
raise Exception("Illegal value")
else:
other.value=not value
def __init__(self):
self.initial=0
self.open=SudukoBoard.sz**2
self.cells=[]
self.rows=[SudukoBoard.Set() for i in range(SudukoBoard.sz)]
self.cols=[SudukoBoard.Set() for i in range(SudukoBoard.sz)]
self.blks=[SudukoBoard.Set() for i in range(SudukoBoard.sz)]
s3=SudukoBoard.side*SudukoBoard.sz
for i in range(SudukoBoard.sz**2):
cell=SudukoBoard.Cell(self,i//SudukoBoard.sz,i%SudukoBoard.sz)
self.cells.append(cell)
for cell in self.cells:
self.rows[cell.row].add_cell(cell)
self.cols[cell.col].add_cell(cell)
self.blks[(cell.row)//SudukoBoard.side+((cell.col)//SudukoBoard.side)*SudukoBoard.side].add_cell(cell)
def setup(self,txt):
trows=txt.split(",")
if len(trows)!=SudukoBoard.sz:
raise Exception("Incorrect number of rows")
cnt=0
for ridx,trow in enumerate(trows):
if len(trows) != SudukoBoard.sz:
raise Exception("Incorrect number of columns row ",ridx)
for cidx,c in enumerate(trow):
if c != '.':
v=int(c)
cnt+=1
self.set(ridx,cidx,v)
# print("Set ",ridx+1,cidx+1, " tot ",cnt," left ",self.open,
# " auto ",SudukoBoard.sz**2-self.open-cnt)
# self.print()
def set(self,row,col,value):
self.rows[row].entries[col].value=value
def print(self):
for ridx,r in enumerate(self.rows):
for cidx,c in enumerate(r.entries):
print("." if c.value is None else c.value,end='')
if (cidx+1)%SudukoBoard.side == 0:
print("|",end='')
print()
if (ridx+1)%SudukoBoard.side == 0:
print("{}".format("-"*(SudukoBoard.sz+SudukoBoard.side)))
def solve(self,depth=0,guesses=[]):
for i in range(1000):
print("Iteration ",depth,i)
# for c in self.cells:
# print(c.row,c.col,c.couldbelist(),c._value,c._values)
open=[Counter([len(c.couldbelist()) for c in self.cells])]
print("open cells",open)
for c in self.cells:
if c.open!=1:
continue
if c.open != len(c.couldbelist()):
pass
value=c.couldbelist()
c.set(value)
if self.open >0 and not 1 in open:
print("We have to guess depth {} and {} cells open".format(depth,self.open))
bestguess=[]
for c in self.cells:
for guess in c.couldbelist():
other=deepcopy(self)
try:
other.set(c.row,c.col,guess)
bestguess.append((other.open,(c.row,c.col,guess)))
except ValueError as e:
pass
except Exception as e:
print_exc()
for open,(row,col,guess) in sorted(bestguess):
print("Best guess ",row,col,guess,depth)
other = deepcopy(self)
other.set(row,col,guess)
soln,soln_guesses = other.solve(depth + 1,guesses+[(row,col,guess)])
if soln.open == 0:
print("guess return")
return soln,soln_guesses
# if self.open == 0:
# print("Solved with {} guesses {}".format(depth,guesses))
# self.print()
return self,guesses
def leftopen(self):
cnt=0
for c in self.cells:
if c.value is None:
cnt+=1
if cnt != self.open:
assert "BAD"
return cnt
if __name__ == "__main__":
board=SudukoBoard()
evil="..1.4..6.,...8...2.,..4..9.3.,.48..76..,5.......9,..25..47.,.8.1..2..,.5...6...,.6..9.1.."
evil2="..9..3.14,....96...,.2....9..,..8.....1,..12784..,6.....7..,..7....4.,...93....,46.8..3.."
medium="8.4.7.6.5,....8237.,7......1.,35...8...,....9....,...4...61,.3......7,.9571....,4.6.3.1.2"
hard="......1..,7..4.18..,..375..4.,4.1.7....,.9..8..7.,....9.6.5,.6..129..,..45.6..2,..2......"
easy=".7.4..2..,2..5791..,.4......6,..261.35.,631...427,.54.328..,5......3.,..6157..4,..8..6.1."
board.setup(evil2)
board.print()
print()
soln,guesses=board.solve()
print("Final : guesses",guesses)
soln.print()
pass |
7,842 | 5bd2cf2ae68708d2b1dbbe0323a5f83837f7b564 | import requests
from urllib.parse import urlparse, urlencode
from json import JSONDecodeError
from requests.exceptions import HTTPError
def validate_response(response):
"""
raise exception if error response occurred
"""
r = response
try:
r.raise_for_status()
except HTTPError as e:
message = dict(status_code=r.status_code, exception=e)
try:
response = r.json()
message['response'] = response
except JSONDecodeError as e:
message['response'] = r.content
raise HTTPError(message)
class CpmsConnector:
"""The CpmsConnector object allow you communicate through
cpms between application.
"""
ORDER_STATUS = ('NEW', 'IN_PROGRESS', 'COMPLETED', 'CANCELED', 'ERROR')
def __init__(self, config):
"""initialize with config
config(dict): must supply username, api_key, api_url
"""
self.username = config['username']
self.api_key = config['api_key']
self.api_url = config['api_url']
self._token = None
self._set_token()
@property
def _fulfillment_url(self):
netloc = f'fulfillment.{urlparse(self.api_url).netloc}'
return urlparse(self.api_url)._replace(netloc=netloc).geturl()
def _update_headers(self, token):
self.headers = {
'X-Subject-Token': token
}
@property
def token(self):
return self._token
def _set_token(self):
path = '/identity/token'
payload = {
"auth":
{
"apiKeyCredentials":
{
"username": self.username,
"apiKey": self.api_key
}
}
}
url = urlparse(self.api_url)._replace(path=path).geturl()
r = requests.post(url, json=payload)
validate_response(r)
token = r.json()['token']['token_id']
self._update_headers(token)
self._token = token
def get_order(self, channel_id, order_id):
"""retrieve single order of sales order
Args:
url(str): url for retrieval sales order
"""
path = f'/channel/{channel_id}/order/{order_id}'
url = urlparse(self._fulfillment_url)._replace(path=path).geturl()
r = requests.get(url, headers=self.headers)
validate_response(r)
return r.json()
def get_orders_status(self, channel_id=None, partner_id=None, list_id=None,
since=None, order_status=None):
"""Get list order status of sales order
Args:
channel_id(str): channel_id of cpms
partner_id(str): merchant/partner id of cpms
list_id(list): list of order id
since(str): ISO 8601 format eg. 2015-06-18T10:30:40Z
order_status(str): (NEW, IN_PROGRESS, COMPLETED, CANCELED, ERROR)
Returns:
list: all orders
"""
if order_status and order_status not in self.ORDER_STATUS:
raise ValueError(
'invalid order_status eg. '
'(NEW, IN_PROGRESS, COMPLETED, CANCELED, ERROR)'
)
url = urlparse(self._fulfillment_url)
# make sure channel_id or partner_id being supply
if channel_id:
path = f'/channel/{channel_id}'
elif partner_id:
path = f'/partner/{partner_id}'
else:
raise ValueError(
'must supply either channel_id or partner_id args')
# append sales-order-status path
path += '/sales-order-status'
# make sure list_id or since being supply
if list_id:
if len(list_id) > 10:
raise ValueError('list_id can\'t be more than 10 length')
path += '/id'
query_string = {'id': list_id}
elif since:
query_string = {'id': list_id}
if order_status in self.ORDER_STATUS:
query_string.update({'orderStatus': order_status})
else:
raise ValueError('must supply either list_id or since args')
query_string = urlencode(query_string, doseq=True)
url = url._replace(path=path, query=query_string).geturl()
r = requests.get(url, headers=self.headers)
validate_response(r)
orders = r.json()
next_url = r.links['next']['url'] if 'next' in r.links else None
return orders, next_url
def create_order(self, channel_id, order_id, payload):
"""create order to acommerce (CPMS)
Args:
channel_id(str): channel_id of cpms
order_id(str): order_id of merchant or partner
payload(dict): order body
Returns:
response or exception
"""
path = f'/channel/{channel_id}/order/{order_id}'
url = urlparse(self._fulfillment_url)._replace(path=path).geturl()
r = requests.put(url=url, json=payload, headers=self.headers)
validate_response(r)
return {
'code': r.status_code,
'message': 'Order has been successfully created'
}
def get_stocks(self, channel_id, partner_id, since):
"""Get list stock of partner from specifics channel/marketplace
Args:
channel_id(str): channel_id cpms
partner_id(str): partner/merchant id
since(str): ISO 8601 format eg. 2015-06-18T10:30:40Z
Returns (list): list of stock
"""
path = f'/channel/{channel_id}/allocation/merchant/{partner_id}'
query_string = urlencode({'since': since})
url = urlparse(self._fulfillment_url)._replace(
path=path, query=query_string).geturl()
r = requests.get(url, headers=self.headers)
validate_response(r)
next_link = r.links['next']['url'] if 'next' in r.links else None
return {'data': r.json(), 'url': url} \
if next_link else {'data': r.json()}
def _get_webhook_path(self, channel_id, partner_id):
if not (channel_id or partner_id):
raise ValueError('channel_id or partner_id must be fill')
return f'/channel/{channel_id}' \
if channel_id else f'/partner/{partner_id}'
def create_webhook(self, payload, channel_id=None, partner_id=None):
"""Create webhook registration end point to acommerce either using
channel_id or partner_id
Args:
channel_id(str): channel_id of acommerce (CPMS)
partner_id(str): merchant or partner id acommerce (CPMS)
payload(str): webhook data format acommerce
Returns (dict): webhook data informations
"""
path = self._get_webhook_path(channel_id, partner_id)
path += '/hooks'
url = urlparse(self.api_url)._replace(path=path).geturl()
r = requests.post(url=url, json=payload, headers=self.headers)
validate_response(r)
return r.json()
def retrieve_webhook(self, webhook_id, channel_id=None, partner_id=None):
"""Retrieve specific webhook information using webhook_id.
must supply either partner_id or channel_id
Args:
webhook_id: registered webhook id
channel_id(str): channel_id of acommerce (CPMS)
partner_id(str): merchant or partner id acommerce (CPMS)
Returns (dict): webhook data informations
"""
path = self._get_webhook_path(channel_id, partner_id)
path += f'/hooks/{webhook_id}'
url = urlparse(self.api_url)._replace(path=path).geturl()
r = requests.get(url=url, headers=self.headers)
validate_response(r)
return r.json()
def get_webhook(self, channel_id=None, partner_id=None):
"""Get list registered webhook from acommerce using either partner_id
or channel_id
Args:
channel_id(str): channel_id of acommerce (CPMS)
partner_id(str): merchant or partner id acommerce (CPMS)
Returns (list): webhook data informations
"""
path = self._get_webhook_path(channel_id, partner_id)
path += '/hooks'
url = url = urlparse(self.api_url)._replace(path=path).geturl()
r = requests.get(url, headers=self.headers)
validate_response(r)
return r.json()
def delete_webhook(self, webhook_id, channel_id=None, partner_id=None):
"""remove a registered webhook
Args:
webhook_id: registered webhook id
channel_id(str): channel_id of acommerce (CPMS)
partner_id(str): merchant or partner id acommerce (CPMS)
Returns No Content HTTP 204
"""
path = self._get_webhook_path(channel_id, partner_id)
path += '/hooks'
url = urlparse(self.api_url)._replace(path=path).geturl()
r = requests.delete(url, headers=self.headers)
validate_response(r)
return {
'code': r.status_code,
'message': 'Web Hook has been successfully deleted'
}
|
7,843 | 15a894e6f94fc62b97d1614a4213f21331ef12a0 | import collections
s = [('yellow', 1), ('blue', 2), ('yellow', 3), ('blue', 4), ('red', 1)]
d = collections.defaultdict(list)
d2 = {'test':121}
for k, v in s:
d[k].append(v)
d['test'].append('value')
print list(d.items())
print d
print d['blue']
print type(d)
print type(d2) |
7,844 | de88e2d2cf165b35f247ea89300c91b3c8c07fea | # Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
from typing import Dict, List, Tuple, TypeVar
import pytest
from nncf.data import Dataset
from nncf.quantization.advanced_parameters import AdvancedQuantizationParameters
from nncf.quantization.advanced_parameters import OverflowFix
from nncf.quantization.algorithms.bias_correction.backend import BiasCorrectionAlgoBackend
from nncf.quantization.algorithms.post_training.algorithm import PostTrainingQuantization
from tests.post_training.test_templates.helpers import ConvTestModel
from tests.post_training.test_templates.helpers import MultipleConvTestModel
from tests.post_training.test_templates.helpers import StaticDatasetMock
TModel = TypeVar("TModel")
TTensor = TypeVar("TTensor")
class TemplateTestBCAlgorithm:
@staticmethod
@abstractmethod
def list_to_backend_type(data: List) -> TTensor:
"""
Convert list to backend specific type
:param data: List of data.
:return: Converted data.
"""
@staticmethod
@abstractmethod
def get_backend() -> BiasCorrectionAlgoBackend:
"""
Get backend specific BiasCorrectionAlgoBackend
:return BiasCorrectionAlgoBackend: Backend specific BiasCorrectionAlgoBackend
"""
@staticmethod
def fn_to_type(tensor):
return tensor
@staticmethod
@abstractmethod
def get_transform_fn():
"""
Get transformation function for dataset.
"""
def get_dataset(self, input_size: Tuple):
"""
Return backend specific random dataset.
:param model: The model for which the dataset is being created.
"""
return StaticDatasetMock(input_size, self.fn_to_type)
@staticmethod
@abstractmethod
def backend_specific_model(model: TModel, tmp_dir: str):
"""
Return backend specific model.
"""
@staticmethod
@abstractmethod
def check_bias(model: TModel, ref_biases: Dict):
"""
Checks biases values.
"""
@staticmethod
def map_references(ref_biases: Dict) -> Dict[str, List]:
"""
Returns backend-specific reference.
"""
return ref_biases
@staticmethod
def get_quantization_algorithm():
return PostTrainingQuantization(
subset_size=1,
fast_bias_correction=False,
advanced_parameters=AdvancedQuantizationParameters(overflow_fix=OverflowFix.DISABLE),
)
@pytest.mark.parametrize(
"model_cls, ref_biases",
(
(
MultipleConvTestModel,
{
"/conv_1/Conv": [0.6658976, -0.70563036],
"/conv_2/Conv": [-0.307696, -0.42806846, 0.44965455],
"/conv_3/Conv": [-0.0033792169, 1.0661412],
"/conv_4/Conv": [-0.6941606, 0.9958957, 0.6081058],
# Disabled latest layer due to backends differences
# "/conv_5/Conv": [0.07476559, -0.75797373],
},
),
(ConvTestModel, {"/conv/Conv": [0.11085186, 1.0017344]}),
),
)
def test_update_bias(self, model_cls, ref_biases, tmpdir):
model = self.backend_specific_model(model_cls(), tmpdir)
dataset = Dataset(self.get_dataset(model_cls.INPUT_SIZE), self.get_transform_fn())
quantization_algorithm = self.get_quantization_algorithm()
quantized_model = quantization_algorithm.apply(model, dataset=dataset)
mapped_ref_biases = self.map_references(ref_biases)
self.check_bias(quantized_model, mapped_ref_biases)
|
7,845 | 15c1db535beb115c45aeba433a946255f70fa86e | # -*- coding: utf-8 -*-
import base64
import logging
from decimal import Decimal
import requests
from django import forms
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from currencies.currencies import decimal_round
from payments.systems import base
from payments.systems.bankusd import display_amount_usd
from payments.systems.base import CommissionCalculationResult
name = _("Neteller")
logo = "neteller.png"
slug = __name__.rsplit(".", 1)[-1]
currencies = ["USD"]
mt4_payment_slug = "NETELLER"
transfer_details = {
"deposit": {
"fee": "3.5% min $1",
"time": _("Within day"),
"min_amount": display_amount_usd(10),
},
"withdraw": {
"fee": _("2.5% min $1 max $30"),
"time": _("Within day"),
"min_amount": display_amount_usd(10),
}
}
templates = {
"deposit": "payments/forms/deposit/neteller.html",
"withdraw": "payments/forms/withdraw/electronic.html",
}
log = logging.getLogger(__name__)
class DepositForm(base.DepositForm):
purse = forms.CharField(max_length=100, label=_("Net account"),
help_text=_("Your Neteller's 12-digit Account ID or email address that is "
"associated with their NETELLER account"))
secure_id = forms.IntegerField(label=_("Secure ID"), help_text=_("Your Neteller's 6-digit Secure ID"))
bill_address = "https://api.neteller.com/v1/transferIn"
get_token_url = "https://api.neteller.com/v1/oauth2/token?grant_type=client_credentials"
commission_rate = Decimal("0.035")
MIN_AMOUNT = (10, 'USD')
@classmethod
def is_automatic(cls, instance):
return True
def get_neteller_token(self):
"""
:return: tuple. ('accessToken', 'Auth method'). Example: ("0.AQAAAU3in", "Bearer")
or None if can't get token.
"""
headers = {'Content-Type': 'application/json',
'Cache-Control': 'no-cache',
'Authorization': 'Basic ' + base64.b64encode(
settings.NETELLER_MERCHANT_ID + ':' + settings.NETELLER_SECRET_KEY)}
result = requests.post(self.get_token_url, headers = headers)
if result.status_code == 200:
result = result.json()
else:
return None
if result.get("accessToken"):
return result.get("accessToken"), result.get("tokenType")
else:
return None
def make_request(self):
import json
currency = {
"RUR": "RUB"
}.get(self.instance.currency, self.instance.currency)
amount = int(decimal_round(self.instance.amount) * 100)
token_tuple = self.get_neteller_token()
if not token_tuple:
return "Can't get the token."
data = {
"paymentMethod": {
"type": "neteller",
"value": self.instance.purse
},
"transaction": {
"merchantRefId": unicode(self.instance.pk),
"amount": amount,
"currency": currency
},
"verificationCode": unicode(self.instance.params["secure_id"]),
}
headers = {'Content-Type': 'application/json', 'Authorization': token_tuple[1] + " " + token_tuple[0]}
request = requests.post(self.bill_address, data=json.dumps(data), headers=headers)
request = request.json()
if request.get("transaction") and request.get("transaction").get("status") == "accepted":
self.instance.refresh_state()
self.instance.is_payed = True
self.instance.params["transaction"] = request.get("transaction").get("id")
self.instance.save()
return None
else:
error_message = request.get("error").get("message") if request.get("error") else \
"Automatic payment failed."
self.instance.is_committed = False
self.instance.is_payed = False
self.instance.public_comment = error_message
self.instance.save()
return error_message
@classmethod
def generate_mt4_comment(cls, payment_request):
return "{NETELLER}[%s]" % payment_request.pk
def clean(self):
from platforms.converter import convert_currency
amount = self.cleaned_data["amount"]
currency = self.cleaned_data["currency"]
return super(DepositForm, self).clean()
def confirmed_response_data(self, request):
error = self.make_request()
if error:
return {'detail': "Error: %s" % error}, 400
else:
return {"success": True}, None
@classmethod
def _calculate_commission(cls, request, full_commission=False):
commission = request.amount * cls.commission_rate
min_comm = Decimal("1")
commission = max(min_comm, commission)
return CommissionCalculationResult(
amount=request.amount,
commission=commission,
currency=request.currency
)
class DetailsForm(base.DetailsForm):
def __init__(self, *args, **kwargs):
super(DetailsForm, self).__init__(*args, **kwargs)
self.fields["purse"].label = _("Net account")
self.fields["purse"].help_text = _("Your Neteller's 12-digit Account ID or email address that is "
"associated with their NETELLER account")
class WithdrawForm(base.WithdrawForm):
MIN_AMOUNT = (10, 'USD')
commission_rate = Decimal("0.025")
@classmethod
def _calculate_commission(cls, request, full_commission=False):
commission = request.amount * cls.commission_rate
min_comm = Decimal("1")
max_comm = Decimal("30")
commission = min(max_comm, max(min_comm, commission))
return CommissionCalculationResult(
amount=request.amount,
commission=commission,
currency=request.currency
)
|
7,846 | 44b6ee8488869da447882457897ce87b2fdea726 | import getpass
print('****************************')
print('***** Caixa Eletronico *****')
print('****************************')
account_typed = input("Digite sua conta: ")
password_typed = getpass.getpass("Digite sua senha: ")
|
7,847 | 89ce3d3ec9691ab8f54cc0d9d008e06c65b5f2cc | #grabbed the following from moses marsh -- https://github.com/sidetrackedmind/gimme-bus/blob/master/gimmebus/utilities.py
from datetime import datetime as dt
from math import radians, cos, sin, acos, asin, sqrt
import networkx as nx
## These functions will go in model.py for matching historical GPS
## positions to the defined route shapes
def haversine(pt1, pt2):
"""
INPUT: tuples (lon1, lat1), (lon2, lat2)
OUTPUT: The great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [pt1[0], pt1[1], pt2[0], pt2[1]])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2.)**2 + cos(lat1) * cos(lat2) * sin(dlon/2.)**2
c = 2 * asin(sqrt(a))
r = 6371 # Radius of earth in kilometers. Use 3956 for miles
return c * r
def get_closest_shape_pt(lat, lon, shape):
dist = shape.apply(lambda x: haversine((x['shape_pt_lon'], \
x['shape_pt_lat']), (lon, lat)), axis=1)
return dist.argmin()
def distance_along_route(pt_1_ind, pt_2_ind, shape):
d1 = shape.loc[pt_1_ind]['shape_dist_traveled']
d2 = shape.loc[pt_2_ind]['shape_dist_traveled']
return d2 - d1
def distance_from_segment(pt, seg_pt_1, seg_pt_2):
c = haversine(seg_pt_1, seg_pt_2)
b = haversine(seg_pt_1, pt)
a = haversine(seg_pt_2, pt)
num1 = (b**2 + c**2 - a**2)
num2 = (a**2 + c**2 - b**2)
if (num1 < 0) or (num2 < 0):
return min(a, b)
theta = acos( num1 / (2.*b*c))
h = b * sin(theta)
return h
|
7,848 | c48d5d9e088acfed0c59e99d3227c25689d205c6 | naam = raw_input("Wat is je naam?")
getal = raw_input("Geef me een getal?")
if naam == "Barrie":
print "Welkom " * int(getal)
else:
print "Helaas, tot ziens" |
7,849 | 29c25721a4754650f0d5d63d6cc3215cb0ea1b3e | """
bubble sort
start at beginning switch to left if smaller - very naive approach
n-1 comparisons, n-1 iterations
(n-1)^2
worst case: O(n^2) = average case
best case: O(n)
space complexity: O(1)
"""
def bubbleSort(list):
for num in range(len(list)-1,0,-1):
for i in range(num):
if list[i] > list[i+1]:
temp = list[i]
list[i] = list [i+1]
list[i+1] = temp
list = [12,34,2,45,6]
bubbleSort(list)
print(list)
|
7,850 | 1a1a217b382f3c58c6c4cd3c1c3f556ae945f5a7 | from selenium import webdriver;
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.keys import Keys
import time
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.implicitly_wait(10)
driver.maximize_window()
driver.get("http://demo.automationtesting.in/Register.html")
interactions = driver.find_element_by_xpath("//a[@class='dropdown-toggle' and contains(text(),'Interactions ')]")
drag = driver.find_element_by_xpath("//a[@class='dropdown-toggle' and contains(text(),'Drag and Drop ')]")
static = driver.find_element_by_xpath("//ul[@class='childmenu ']//a[contains(text(),'Static ')]")
actions = ActionChains(driver)
actions.move_to_element(interactions).move_to_element(drag).move_to_element(static).click().perform()
time.sleep(5)
driver.get("http://testautomationpractice.blogspot.com/")
ele = driver.find_element_by_xpath("//*[@id='HTML10']/div[1]/button")
actions.double_click(ele).perform()
time.sleep(5)
driver.close()
|
7,851 | 22e24e8dd49367ae57d1980c4addf48d65c5e897 | '''
Created on Nov 20, 2012
@author: shriram
'''
import xml.etree.ElementTree as ET
from xml.sax.saxutils import escape
'''
Annotating only Sparse and Non Sparse Lines
'''
class Trainer:
def html_escape(self,text):
html_escape_table = {
'"': """,
"'": "'"
}
return escape(text, html_escape_table)
def train(self, preprocessedxml, xmlname):
f = open('../TrainingData/htmls/train'+xmlname+'.html','w')
f.write('<html><body><form action="http://localhost/cgi-bin/TableProcessor.py" method="post">')
f.write('<input type="hidden" name="xmlname" value="'+xmlname +'"/>')
i = 0
pageno = 0
colno = 0
for page in preprocessedxml:
f.write('<div class="page"><input type="hidden" name="pagebegin'+str(pageno)+'" value="'+str(colno)+'"/>')
for col in page:
f.write('<div class="col"><input type="hidden" name="colbegin'+str(colno)+'" value="'+str(i)+'"/>')
for tup in col:
f.write('<div><select id="docparams" name="docparams'+ str(i) +'">')
f.write('<option value="sparse">Sparse</option>')
f.write('<option value="nonsparse" selected="selected">Not Sparse</option>')
f.write("</select><input type='hidden' name='texttag"+str(i)+"' value='"+ self.html_escape(ET.tostring(tup[1],'utf-8',"xml")) + "'/>"+ ET.tostring(tup[1]) +"</div>")
i += 1
f.write('<input type="hidden" name="colend'+str(colno)+'" value="'+str(i)+'"/><div>')
colno += 1
f.write('<input type="hidden" name="pageend'+str(pageno)+'" value="'+str(colno)+'"/> <div>')
pageno += 1
f.write('<input type="submit" value="Done!"/></form></body></html>')
f.close()
def readAnnotatedXml(self,xmlname):
f = open(xmlname)
preprocessedxml = list()
col = list()
for line in f:
if(line == "=============================== PAGE ===================================\n"):
pagelist = list()
preprocessedxml.append(pagelist)
elif(line == "=============================== COL ===================================\n"):
col = list()
pagelist.append(col)
else:
tup0 = line[:line.find(" ")]
tup1 = line[line.find(" ")+1:]
col.append([tup0,ET.fromstring(tup1)])
return preprocessedxml
def readAnnotatedxmlforTableDecomposition(self, xmlname):
f = open(xmlname)
table = list()
for line in f:
if(line.strip() == ''):
continue
tup0 = line[:line.find("\t")]
tup1 = line[line.find("\t")+1:]
table.append([tup0,ET.fromstring(tup1)])
return table
|
7,852 | f3a3746c48617754aad5ae8d0d7a0b8908c34562 |
# coding: utf-8
# In[5]:
import os
import numpy as np
import pandas as pd
from PIL import Image
import argparse
import time
import shutil
from sklearn.metrics import accuracy_score, mean_squared_error
import torch
import torch.optim
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.models as models
import matplotlib.image as mpimg
class ProtestDataset(Dataset):
"""
dataset for training and evaluation
"""
def __init__(self, txt_file, img_dir, transform = None):
"""
Args:
txt_file: Path to txt file with annotation
img_dir: Directory with images
transform: Optional transform to be applied on a sample.
"""
self.label_frame = pd.read_csv(txt_file, delimiter="\t").replace('-', 0)
self.img_dir = img_dir
self.transform = transform
def __len__(self):
return len(self.label_frame)
def __getitem__(self, idx):
imgpath = os.path.join(self.img_dir,
self.label_frame.iloc[idx, 0])
image = pil_loader(imgpath)
protest = self.label_frame.iloc[idx, 1:2].values.astype('float')
violence = self.label_frame.iloc[idx, 2:3].values.astype('float')
visattr = self.label_frame.iloc[idx, 3:].values.astype('float')
label = {'protest':protest, 'violence':violence, 'visattr':visattr}
sample = {"image":image, "label":label}
if self.transform:
sample["image"] = self.transform(sample["image"])
return sample
class ProtestDatasetEval(Dataset):
"""
dataset for just calculating the output (does not need an annotation file)
"""
def __init__(self, img_dir):
"""
Args:
img_dir: Directory with images
"""
self.img_dir = img_dir
self.transform = transforms.Compose([
transforms.Resize(125),
transforms.CenterCrop(100),
transforms.Grayscale(num_output_channels=1), #testtest
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
self.img_list = sorted(os.listdir(img_dir))
def __len__(self):
return len(self.img_list)
def __getitem__(self, idx):
imgpath = os.path.join(self.img_dir,
self.img_list[idx])
image = pil_loader(imgpath)
# we need this variable to check if the image is protest or not)
sample = {"imgpath":imgpath, "image":image}
sample["image"] = self.transform(sample["image"])
return sample
class FinalLayer(nn.Module):
"""modified last layer for resnet50 for our dataset"""
def __init__(self):
super(FinalLayer, self).__init__()
self.fc = nn.Linear(2048, 12)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.fc(x)
out = self.sigmoid(out)
return out
def pil_loader(path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def modified_resnet():
# load pretrained resnet with a modified last fully connected layer
model = models.resnet50(pretrained = True)
model.fc = FinalLayer()
return model
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
if self.count != 0:
self.avg = self.sum / self.count
class Lighting(object):
"""
Lighting noise(AlexNet - style PCA - based noise)
https://github.com/zhanghang1989/PyTorch-Encoding/blob/master/experiments/recognition/dataset/minc.py
"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone() .mul(alpha.view(1, 3).expand(3, 3)) .mul(self.eigval.view(1, 3).expand(3, 3)) .sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
# for indexing output of the model
protest_idx = Variable(torch.LongTensor([0]))
violence_idx = Variable(torch.LongTensor([1]))
visattr_idx = Variable(torch.LongTensor(range(2,12)))
best_loss = float("inf")
def calculate_loss(output, target, criterions, weights = [1, 10, 5]):
"""Calculate loss"""
# number of protest images
N_protest = int(target['protest'].data.sum())
batch_size = len(target['protest'])
if N_protest == 0:
# if no protest image in target
outputs = [None]
# protest output
outputs[0] = output.index_select(1, protest_idx)
targets = [None]
# protest target
targets[0] = target['protest'].float()
losses = [weights[i] * criterions[i](outputs[i], targets[i]) for i in range(1)]
scores = {}
scores['protest_acc'] = accuracy_score((outputs[0]).data.round(), targets[0].data)
scores['violence_mse'] = 0
scores['visattr_acc'] = 0
return losses, scores, N_protest
# used for filling 0 for non-protest images
not_protest_mask = (1 - target['protest']).byte()
outputs = [None] * 4
# protest output
outputs[0] = output.index_select(1, protest_idx)
# violence output
outputs[1] = output.index_select(1, violence_idx)
outputs[1].masked_fill_(not_protest_mask, 0)
# visual attribute output
outputs[2] = output.index_select(1, visattr_idx)
outputs[2].masked_fill_(not_protest_mask.repeat(1, 10),0)
targets = [None] * 4
targets[0] = target['protest'].float()
targets[1] = target['violence'].float()
targets[2] = target['visattr'].float()
scores = {}
# protest accuracy for this batch
scores['protest_acc'] = accuracy_score(outputs[0].data.round(), targets[0].data)
# violence MSE for this batch
scores['violence_mse'] = ((outputs[1].data - targets[1].data).pow(2)).sum() / float(N_protest)
# mean accuracy for visual attribute for this batch
comparison = (outputs[2].data.round() == targets[2].data)
comparison.masked_fill_(not_protest_mask.repeat(1, 10).data,0)
n_right = comparison.float().sum()
mean_acc = n_right / float(N_protest*10)
scores['visattr_acc'] = mean_acc
# return weighted loss
losses = [weights[i] * criterions[i](outputs[i], targets[i]) for i in range(len(criterions))]
return losses, scores, N_protest
def train(train_loader, model, criterions, optimizer, epoch):
"""training the model"""
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
loss_protest = AverageMeter()
loss_v = AverageMeter()
protest_acc = AverageMeter()
violence_mse = AverageMeter()
visattr_acc = AverageMeter()
end = time.time()
loss_history = []
for i, sample in enumerate(train_loader):
# measure data loading batch_time
input, target = sample['image'], sample['label']
data_time.update(time.time() - end)
if args.cuda:
input = input.cuda()
for k, v in target.items():
target[k] = v.cuda()
target_var = {}
for k,v in target.items():
target_var[k] = Variable(v)
input_var = Variable(input)
output = model(input_var)
losses, scores, N_protest = calculate_loss(output, target_var, criterions)
optimizer.zero_grad()
loss = 0
for l in losses:
loss += l
# back prop
loss.backward()
optimizer.step()
if N_protest:
loss_protest.update(losses[0].data, input.size(0))
loss_v.update(loss.data - losses[0].data, N_protest)
else:
# when there is no protest image in the batch
loss_protest.update(losses[0].data, input.size(0))
loss_history.append(loss.data)
protest_acc.update(scores['protest_acc'], input.size(0))
violence_mse.update(scores['violence_mse'], N_protest)
visattr_acc.update(scores['visattr_acc'], N_protest)
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}] '
'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) '
'Data {data_time.val:.2f} ({data_time.avg:.2f}) '
'Loss {loss_val:.3f} ({loss_avg:.3f}) '
'Protest {protest_acc.val:.3f} ({protest_acc.avg:.3f}) '
'Violence {violence_mse.val:.5f} ({violence_mse.avg:.5f}) '
'Vis Attr {visattr_acc.val:.3f} ({visattr_acc.avg:.3f})'
.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time,
loss_val=loss_protest.val + loss_v.val,
loss_avg = loss_protest.avg + loss_v.avg,
protest_acc = protest_acc, violence_mse = violence_mse,
visattr_acc = visattr_acc))
return loss_history
def validate(val_loader, model, criterions, epoch):
"""Validating"""
model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
loss_protest = AverageMeter()
loss_v = AverageMeter()
protest_acc = AverageMeter()
violence_mse = AverageMeter()
visattr_acc = AverageMeter()
end = time.time()
loss_history = []
for i, sample in enumerate(val_loader):
# measure data loading batch_time
input, target = sample['image'], sample['label']
if args.cuda:
input = input.cuda()
for k, v in target.items():
target[k] = v.cuda()
input_var = Variable(input)
target_var = {}
for k,v in target.items():
target_var[k] = Variable(v)
output = model(input_var)
losses, scores, N_protest = calculate_loss(output, target_var, criterions)
loss = 0
for l in losses:
loss += l
if N_protest:
loss_protest.update(losses[0].data, input.size(0))
loss_v.update(loss.data - losses[0].data, N_protest)
else:
# when no protest images
loss_protest.update(losses[0].data, input.size(0))
loss_history.append(loss.data)
protest_acc.update(scores['protest_acc'], input.size(0))
violence_mse.update(scores['violence_mse'], N_protest)
visattr_acc.update(scores['visattr_acc'], N_protest)
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) '
'Loss {loss_val:.3f} ({loss_avg:.3f}) '
'Protest Acc {protest_acc.val:.3f} ({protest_acc.avg:.3f}) '
'Violence MSE {violence_mse.val:.5f} ({violence_mse.avg:.5f}) '
'Vis Attr Acc {visattr_acc.val:.3f} ({visattr_acc.avg:.3f})'
.format(
epoch, i, len(val_loader), batch_time=batch_time,
loss_val =loss_protest.val + loss_v.val,
loss_avg = loss_protest.avg + loss_v.avg,
protest_acc = protest_acc,
violence_mse = violence_mse, visattr_acc = visattr_acc))
print(' * Loss {loss_avg:.3f} Protest Acc {protest_acc.avg:.3f} '
'Violence MSE {violence_mse.avg:.5f} '
'Vis Attr Acc {visattr_acc.avg:.3f} '
.format(loss_avg = loss_protest.avg + loss_v.avg,
protest_acc = protest_acc,
violence_mse = violence_mse, visattr_acc = visattr_acc))
return loss_protest.avg + loss_v.avg, loss_history
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 0.5 every 5 epochs"""
lr = args.lr * (0.4 ** (epoch // 4))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
"""Save checkpoints"""
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def main():
global best_loss
loss_history_train = []
loss_history_val = []
data_dir = args.data_dir
img_dir_train = os.path.join(data_dir, "train")
img_dir_val = os.path.join(data_dir, "test")
txt_file_train = os.path.join(data_dir, "annot_train.txt")
txt_file_val = os.path.join(data_dir, "annot_test.txt")
# load pretrained resnet50 with a modified last fully connected layer
model = modified_resnet()
# we need three different criterion for training
criterion_protest = nn.BCELoss()
criterion_violence = nn.MSELoss()
criterion_visattr = nn.BCELoss()
criterions = [criterion_protest, criterion_violence, criterion_visattr]
if args.cuda and not torch.cuda.is_available():
raise Exception("No GPU Found")
if args.cuda:
model = model.cuda()
criterions = [criterion.cuda() for criterion in criterions]
# we are not training the frozen layers
parameters = filter(lambda p: p.requires_grad, model.parameters())
optimizer = torch.optim.SGD(
parameters, args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay
)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_loss = checkpoint['best_loss']
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
loss_history_train = checkpoint['loss_history_train']
loss_history_val = checkpoint['loss_history_val']
if args.change_lr:
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr
else:
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
eigval = torch.Tensor([0.2175, 0.0188, 0.0045])
eigvec = torch.Tensor([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
train_dataset = ProtestDataset(
txt_file = txt_file_train,
img_dir = img_dir_train,
transform = transforms.Compose([
transforms.RandomResizedCrop(100),
transforms.RandomRotation(30),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness = 0.4,
contrast = 0.7,
saturation = 0.4,
),
transforms.ToTensor(),
Lighting(0.1, eigval, eigvec),
normalize,
]))
val_dataset = ProtestDataset(
txt_file = txt_file_val,
img_dir = img_dir_val,
transform = transforms.Compose([
transforms.Resize(125),
transforms.CenterCrop(100),
transforms.ToTensor(),
normalize,
]))
train_loader = DataLoader(
train_dataset,
num_workers = args.workers,
batch_size = args.batch_size,
shuffle = True
)
val_loader = DataLoader(
val_dataset,
num_workers = args.workers,
batch_size = args.batch_size)
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
loss_history_train_this = train(train_loader, model, criterions,
optimizer, epoch)
loss_val, loss_history_val_this = validate(val_loader, model,
criterions, epoch)
loss_history_train.append(loss_history_train_this)
loss_history_val.append(loss_history_val_this)
is_best = loss_val < best_loss
if is_best:
print('best model!!')
best_loss = min(loss_val, best_loss)
save_checkpoint({
'epoch' : epoch + 1,
'state_dict' : model.state_dict(),
'best_loss' : best_loss,
'optimizer' : optimizer.state_dict(),
'loss_history_train': loss_history_train,
'loss_history_val': loss_history_val
}, is_best)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir",
type=str,
default = "",
help = "directory path to dataset",
)
parser.add_argument("--cuda",
action = "store_true",
help = "use cuda?",
)
parser.add_argument("--workers",
type = int,
default = 0,
help = "number of workers",
)
parser.add_argument("--batch_size",
type = int,
default = 8,
help = "batch size",
)
parser.add_argument("--epochs",
type = int,
default = 10,
help = "number of epochs",
)
parser.add_argument("--weight_decay",
type = float,
default = 1e-4,
help = "weight decay",
)
parser.add_argument("--lr",
type = float,
default = 0.01,
help = "learning rate",
)
parser.add_argument("--momentum",
type = float,
default = 0.9,
help = "momentum",
)
parser.add_argument("--print_freq",
type = int,
default = 10,
help = "print frequency",
)
parser.add_argument('--resume',
default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--change_lr',
action = "store_true",
help = "Use this if you want to \
change learning rate when resuming")
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
args, unknown = parser.parse_known_args()
if args.cuda:
protest_idx = protest_idx.cuda()
violence_idx = violence_idx.cuda()
visattr_idx = visattr_idx.cuda()
main()
|
7,853 | bacd0c729193f064b21ab8e01e98dfc276094458 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Taobao .Inc
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://code.taobao.org/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://code.taobao.org/.
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import *
from django import forms
from django.db.models import Count,Sum,Q
from taocode2.models import *
from taocode2.helper.utils import *
from taocode2.helper.func import wrap
from taocode2.helper import consts
from taocode2.apps.user import activity
from taocode2.apps.repos import svn
from taocode2.settings import *
import time
__author__ = 'luqi@taobao.com'
def build_prj_nav_menu(request, project, choice = None):
uri = '/p/'+project.name
navmenus = [{'uri': uri + '/src', 'txt':'source'},
{'uri': uri + '/issues', 'txt':'issues'},
{'uri': uri + '/wiki', 'txt':'wiki'},
{'uri': uri + '/info', 'txt':'info'}]
if project.owner == request.user:
navmenus.append({'uri': uri + '/admin', 'txt':'admin'})
if choice is None:
navmenus[0]['choice'] = True
else:
for m in navmenus:
if m['uri'].endswith(choice):
m['choice'] = True
return navmenus
def need_owner(view_func):
def _wrapped_view(request, *args, **kwargs):
rc = request.rc
rc.project = q_get(Project, name=kwargs['name'],
status = consts.PROJECT_ENABLE)
rc.project_name = kwargs['name']
if rc.project == None:
raise Http404
if rc.project.owner != request.user:
if request.user.supper is False:
return HttpResponseForbidden()
return view_func(request, *args, **kwargs)
return wrap(view_func, _wrapped_view)
def can_access(prj, user):
if prj is None or prj.status != consts.PROJECT_ENABLE:
raise Http404
if prj.is_public:
return None
if user.is_authenticated() is False:
return HttpResponseForbidden()
if prj.owner != user:
pm = q_get(ProjectMember, project = prj, user = user)
if pm is None:
return HttpResponseForbidden()
return None
def can_write(prj, user):
if prj is None or prj.status != consts.PROJECT_ENABLE:
return False
if user.is_authenticated() is False:
return False
if prj.owner != user:
pm = q_get(ProjectMember, project = prj, user = user)
if pm is None:
return False
return True
@need_owner
@as_json
@login_required
def do_invite(request, name):
if request.method != 'POST':
return False
uname = request.POST.get('u', '').strip()
if len(uname) <= 0:
return False
user = q_get(User, Q(name=uname)|Q(email=uname))
if user is None or user == request.user:
return False
rc = request.rc
pm = q_get(ProjectMember,
project=rc.project, user=user)
if pm is not None:
if pm.member_type != consts.PM_ACCEPT_INV:
pm.member_type = consts.PM_SEND_INV
pm.save()
return True
pm = ProjectMember()
pm.project = rc.project
pm.user = user
pm.member_type = consts.PM_SEND_INV
pm.save()
return True
@login_required
@need_owner
def project_admin(request, name):
rc = request.rc
rc.pagename = name + ' admin'
uri = request.META['PATH_INFO']
#rc.navmenus = [{'uri': uri, 'txt':'basic', 'choice':True},
# {'uri': uri + 'resources', 'txt':'resources'}]
rc.navmenus = build_prj_nav_menu(request, rc.project, 'admin')
res = []
vls = q_gets(Issue, project = rc.project,
status__in = (consts.ISSUE_OPEN,
consts.ISSUE_CLOSED)).values('project').annotate(pc=Count('project'))
res.append(['Issue Count',
len(vls) > 0 and vls[0]['pc'] or 0])
vls = q_gets(ProjectAttachment, project = rc.project,
status = consts.FILE_ENABLE).values('project').annotate(pc=Count('project'))
res.append(['Attachemts Count',
len(vls) > 0 and vls[0]['pc'] or 0])
vls = q_gets(ProjectAttachment,
project = rc.project,
status = consts.FILE_ENABLE).values('project').annotate(ps=Sum('size'))
si = (len(vls) > 0 and vls[0]['ps'] or 0) / (1024*1024.0)
res.append(['Attachemts Total Size','%.4s MB'%si])
r,out, err = exec_cmd(['du','-sbh', os.path.join(settings.REPOS_ROOT, name)])
res.append(['Repository Usage', r != 0 and '0.0 MB' or out.split()[0]])
rc.res = res
rc.licenses = map(lambda x:x[0], consts.LICENSES)
if rc.project.status != consts.PROJECT_ENABLE:
raise Http404
return send_response(request, 'project/admin.html')
@login_required
@need_owner
def project_resources(request, name):
rc = request.rc
rc.pagename = 'Project resources usages'
uri = '/p/'+name+'/admin'
rc.navmenus = [{'uri': uri, 'txt':'basic'},
{'uri': uri + 'resouces',
'txt':'resources', 'choice':True}]
if rc.project.status != consts.PROJECT_ENABLE:
raise Http404
return send_response(request, 'project/resources.html')
@as_json
def get_members(request, name):
project = q_get(Project, name=name)
if project is None:
return False
resp = can_access(project, request.user)
if resp is not None:
return False
members = q_gets(ProjectMember, project=project)
return (True, [m.json() for m in members])
def do_invite_op(request, name, op):
if request.method != 'POST':
return False
project = q_get(Project, Q(name=name))
if project is None:
return False
pm = q_get(ProjectMember, project=project, user=request.user)
if pm is None:
return False
pm.member_type = op
pm.save()
if op == consts.PM_ACCEPT_INV:
activity.join_member(project, request.user, request.user)
return True
@as_json
@login_required
def do_accept(request, name):
return do_invite_op(request, name,
consts.PM_ACCEPT_INV)
@as_json
@login_required
def do_reject(request, name):
return do_invite_op(request, name,
consts.PM_REJECT_INV)
@as_json
@login_required
def do_exit(request, name):
project = q_get(Project, name = name)
if project is None:
return False
ProjectMember.objects.filter(project = project,
user = request.user).delete()
activity.leave_member(project, request.user, request.user)
return True
@login_required
@need_owner
@as_json
def del_member(request, name):
if request.method != 'POST':
return False
uname = request.POST.get('u', '').strip()
if len(uname) <= 0:
return False
rc = request.rc
ProjectMember.objects.filter(project = rc.project,
user = User.objects.filter(name=uname)).delete()
return True
@login_required
@need_owner
@as_json
def del_prj(request, name):
if request.method != 'POST':
return False
del_name = name + '__DELETED__%s'%time.time()
project = request.rc.project
old_name = project.name
project.name = del_name
project.status = consts.PROJECT_MARK_DELETED
project.save()
svn.del_repos(old_name, del_name)
return (True, reverse('apps.user.views.view_user', args=[]))
@login_required
@need_owner
@as_json
def edit_prj(request, name):
if request.method != 'POST':
return False
project = request.rc.project
title = request.POST.get('t','').strip()
if len(title) <= 0:
return False
license = request.POST.get('l','').strip()
is_public = request.POST.get('pub','0').strip()
project.title = title
project.license = license
project.is_public = bool(int(is_public))
project.save()
return True
|
7,854 | c3719f30bcf13061134b34b0925dfa2af4535f14 | #!/usr/bin/env python
from setuptools import setup
import NagAconda
setup(name=NagAconda.__name__,
version=NagAconda.__version__,
description="NagAconda is a Python Nagios wrapper.",
long_description=open('README').read(),
author='Steven Schlegel',
author_email='steven@schlegel.tech',
license='New BSD License',
url='https://github.com/SchlegelS0208/NagAconda',
packages=['NagAconda'],
tests_require=['nose>=0.11',],
install_requires=['Sphinx'],
test_suite = 'nose.collector',
platforms = 'any',
classifiers = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Documentation',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Monitoring',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
)
|
7,855 | d205c38e18b1acf8043a5976a90939b14358dc40 | #-*- coding: utf-8 -*-
espacos = ["__1__", "__2__", "__3__", "__4__"]
facil_respostas=["ouro","leao","capsula do poder","relampago de plasma"]
media_respostas=["Ares","Saga","Gemeos","Athena"]
dificil_respostas=["Shion","Aries","Saga","Gemeos"]
def inicio_game():
apresentacao=raw_input("Bem vindo ao quiz de preenchimento de lacunas de Saint Seiya !!! Digite a tecla enter para iniciar ")
inicio_game()
def select_level():
nivel = raw_input('Escolha um dos seguintes niveis -> facil/ medio/ dificil: ').lower()
facil="Quiz facil: O cavaleiro de __1__ de __2__, pertencente a quinta casa zodiacal, possui os golpes __3__ e __4__ ."
medio="Quiz medio: Conhecido como mestre __1__ , mas de verdadeiro nome __2__ de __3__, tentou assassinar __4__ ainda bebe, e enganou todo o santuario."
dificil="Quiz dificil: __1__de __2__, era o antigo mestre do santuario e tambem mestre de Mu de Aries, morto por __3__ de __4__ na revolta de Saga."
niveis=["facil","medio","dificil"]
if nivel==niveis[0]:
print facil
if nivel==niveis[1]:
print medio
if nivel==niveis[2]:
print dificil
select_level()
|
7,856 | 2d5abcd75dcbeb1baa3f387035bdcc3b7adbfe3f | '''
8-6. 도시 이름
도시와 국가 이름을 받는 city_country() 함수를 만드세요. 이 함수는 다음과 같은 문자열을 반환해야 합니다.
'Santiago, Chile'
- 최소한 세 개의 도시-국가 쌍으로 함수를 호출하고 반환값을 출력하세요.
Output:
santiago, chile
ushuaia, argentina
longyearbyen, svalbard
'''
|
7,857 | f715628da2f1b950b8fbf8aa5b033e5299d3e224 | lc_headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 11_0) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Safari/605.1.15",
"authority": "leetcode.com",
}
lc_all = "https://leetcode.com/api/problems/all/"
lc_submissions = "https://leetcode.com/api/submissions/?offset=%(offset)s&limit=%(limit)s&lastkey=%(lastkey)s"
lc_graphql = "https://leetcode.com/graphql"
query_string = 'query questionData($titleSlug: String!) {\n question(titleSlug: $titleSlug) {\n questionId\n questionFrontendId\n boundTopicId\n title\n titleSlug\n content\n translatedTitle\n translatedContent\n isPaidOnly\n difficulty\n likes\n dislikes\n isLiked\n similarQuestions\n contributors {\n username\n profileUrl\n avatarUrl\n __typename\n }\n topicTags {\n name\n slug\n translatedName\n __typename\n }\n companyTagStats\n codeSnippets {\n lang\n langSlug\n code\n __typename\n }\n stats\n hints\n solution {\n id\n canSeeDetail\n paidOnly\n __typename\n }\n status\n sampleTestCase\n metaData\n judgerAvailable\n judgeType\n mysqlSchemas\n enableRunCode\n enableTestMode\n enableDebugger\n envInfo\n libraryUrl\n adminUrl\n __typename\n }\n}\n'
md_template = '''# [%(id)s] %(title)s (%(difficulty)s)
%(small_tags)s
:+1: %(likes)s :thumbsdown: %(dislikes)s
---
## My Submission
- Language: %(lang)s
- Runtime: %(runtime)s
- Completed time: %(time)s
```%(lang)s
%(code)s
```
## Content
%(contents)s
## Related Problems
%(related_problems)s
## What a(n) %(difficulty)s problem!
Among **%(submission)s** total submissions, **%(accepted)s** are accepted, with an acceptance rate of **%(acc_rate)s**. <br>
- Likes: %(likes)s
- Dislikes: %(dislikes)s
'''
related_template = "[%(related_title)s](%(link)s) (%(related_difficulty)s) <br>"
tag_template = "[s-%(color)s.svg)](%(URL)s) "
raw_md_template = '''## [%(id)s] %(title)s (%(difficulty)s)
%(small_tags)s
👍 %(likes)s 👎 %(dislikes)s
---
## My Submission
- Language: %(lang)s
- Runtime: %(runtime)s
- Completed time: %(time)s
```%(lang)s
%(code)s
```
## Related Problems
%(related_problems)s
## What a(n) %(difficulty)s problem!
Among **%(submission)s** total submissions, **%(accepted)s** are accepted, with an acceptance rate of **%(acc_rate)s**.
- Likes: %(likes)s
- Dislikes: %(dislikes)s
'''
|
7,858 | fe5398b03d2f0cfc7c972677faa0ea3ec701469e | # Create your models here.
from django.db import models
from django.utils import timezone
from django.db import models
# Create your models here.
#필드 개수가 다르다.
class Post(models.Model):
#이 Post의 저자이다라는 의미, CASCADE : 종속이라는 의미
author = models.ForeignKey('auth.User', on_delete=models.CASCADE)
title = models.CharField(max_length=200) #블로그 기사의 제목
text = models.TextField() # 글자수에 제한 없는 텍스트
#생성자를 만들때마다, 반드시 필수 파라미터가 존재해야한다.
created_date = models.DateTimeField(
default=timezone.now) # 날짜와 시간
#Null Field를 허용
published_date = models.DateTimeField(
blank=True, null=True) # 필드가 폼에서 빈 채로 저장되는 것을 허용, null은 DB 관점
def publish(self):
#published_data를 지금날짜로 바꾸고 save
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title |
7,859 | 62de629d8f28435ea8dc3dc093cac95e7cedf128 | # 6. Evaluate Classifier: you can use any metric you choose for this assignment
# (accuracy is the easiest one). Feel free to evaluate it on the same data you
# built the model on (this is not a good idea in general but for this assignment,
# it is fine). We haven't covered models and evaluation yet, so don't worry about
# creating validation sets or cross-validation.
import pandas as pd
import matplotlib.pyplot as plt
import pylab as pl
from sklearn.metrics import roc_curve, auc, classification_report, confusion_matrix
# credits to https://github.com/yhat/DataGotham2013/blob/master/notebooks/8%20-%20Fitting%20and%20Evaluating%20Your%20Model.ipynb
def evaluate(model, X_te, y_te):
'''
Given the model and independent and dependent testing data,
print out statements that evaluate classifier
'''
probs = model.predict_proba(X_te)
plt.hist(probs[:,1])
plt.xlabel('Likelihood of Significant Financial')
plt.ylabel('Frequency')
# We should also look at Accuracy
print("Accuracy = " + str(model.score(X_te, y_te)))
# Finally -- Precision & Recall
y_hat = model.predict(X_te)
print(classification_report(y_te, y_hat, labels=[0, 1]))
y_hat = model.predict(X_te)
confusion_matrix = pd.crosstab(y_hat,
y_te,
rownames=["Actual"],
colnames=["Predicted"])
print(confusion_matrix)
def plot_roc(probs, y_te):
'''
Plots ROC curve.
'''
plt.figure()
fpr, tpr, thresholds = roc_curve(y_te, probs)
roc_auc = auc(fpr, tpr)
pl.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
pl.plot([0, 1], [0, 1], 'k--')
pl.xlim([0.0, 1.05])
pl.ylim([0.0, 1.05])
pl.xlabel('False Positive Rate')
pl.ylabel('True Positive Rate')
pl.title("ROC Curve")
pl.legend(loc="lower right")
pl.show() |
7,860 | 90e475dfd128689dd4e1a5375ced6e4cbfb73c07 | import sys
N = int(input())
card = [int(x+1) for x in range(N)]
trash = []
while len(card)>1:
topCard = card.pop(0)
trash.append(topCard)
card.append(card.pop(0))
outputStr = ""
for i in range(len(trash)):
outputStr += str(trash[i]) + " "
outputStr += str(card[0])
print(outputStr)
|
7,861 | 4da1a97c2144c9aaf96e5fe6508f8b4532b082d4 | import tweepy
import time
import twitter_credentials as TC
auth = tweepy.OAuthHandler(TC.CONSUMER_KEY, TC.CONSUMER_SECRET)
auth.set_access_token(TC.ACCESS_TOKEN, TC.ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
count = 1
# Query to get 50 tweets with either Indiana or Weather in them
for tweet in tweepy.Cursor(api.search, q = "Indiana OR Weather").items(50):
print(str(count) +". "+ tweet.text)
count+=1
|
7,862 | db36c82717aa0bacffce7a3e2724ed2bb586c7fb | from solution import find_days
import pudb
def test():
T = [1, 2, 3, 1, 0, 4]
# pudb.set_trace()
res = find_days(T)
assert res == [1, 1, 3, 2, 1, 0]
|
7,863 | 3a6eaa238e78e7a818bcf6e18cc7881eadf94b07 | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_googlesearch
# Purpose: Searches Google for content related to the domain in question.
#
# Author: Steve Micallef <steve@binarypool.com>
#
# Created: 07/05/2012
# Copyright: (c) Steve Micallef 2012
# Licence: GPL
# -------------------------------------------------------------------------------
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
class sfp_googlesearch(SpiderFootPlugin):
"""Google:Footprint,Investigate:Some light Google scraping to identify sub-domains and links."""
# Default options
opts = {
'fetchlinks': True, # Should we fetch links on the base domain?
'pages': 20 # Number of google results pages to iterate
}
# Option descriptions
optdescs = {
'fetchlinks': "Fetch links found on the target domain-name?",
'pages': "Number of Google results pages to iterate through."
}
# Target
results = list()
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = list()
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ["INTERNET_NAME"]
# What events this module produces
# This is to support the end user in selecting modules based on events
# produced.
def producedEvents(self):
return ["LINKED_URL_INTERNAL", "SEARCH_ENGINE_WEB_CONTENT"]
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if eventData in self.results:
self.sf.debug("Already did a search for " + eventData + ", skipping.")
return None
else:
self.results.append(eventData)
# Sites hosted on the domain
pages = self.sf.googleIterate("site:" + eventData,
dict(limit=self.opts['pages'], useragent=self.opts['_useragent'],
timeout=self.opts['_fetchtimeout']))
if pages is None:
self.sf.info("No results returned from Google.")
return None
for page in pages.keys():
if page in self.results:
continue
else:
self.results.append(page)
# Check if we've been asked to stop
if self.checkForStop():
return None
# Submit the google results for analysis
evt = SpiderFootEvent("SEARCH_ENGINE_WEB_CONTENT", pages[page],
self.__name__, event)
self.notifyListeners(evt)
# We can optionally fetch links to our domain found in the search
# results. These may not have been identified through spidering.
if self.opts['fetchlinks']:
links = self.sf.parseLinks(page, pages[page], eventData)
if len(links) == 0:
continue
for link in links:
if link in self.results:
continue
else:
self.results.append(link)
self.sf.debug("Found a link: " + link)
if self.sf.urlFQDN(link).endswith(eventData):
if self.checkForStop():
return None
evt = SpiderFootEvent("LINKED_URL_INTERNAL", link,
self.__name__, event)
self.notifyListeners(evt)
# End of sfp_googlesearch class
|
7,864 | 9aaaa744780dbd32b14e09a34976a2a0a3ce34f7 | from packages import data as DATA
from packages import plot as PLOT
from packages import universal as UNIVERSAL
from packages import currency_pair as CP
import matplotlib.pyplot as plt
import mpl_finance as mpf
from packages import db as DB
import CONSTANTS
import datetime
from matplotlib.pylab import date2num
from matplotlib.widgets import Cursor
pgmanager=DB.PGManager(**CONSTANTS.DB_CONNECT_ARGS_LOCAL)
tablename='klines_full_vol_50'
rows=pgmanager.select('select * from '+tablename + ' where timestamp>1577808000+86400*5 order by timestamp limit 300')
a=1
alist = []
vols_bid = []
vols_ask = []
diff_bid_2_ask = []
diff_bid_2_ask_in_past_2_epochs = []
diff_bid_2_ask_in_past_3_epochs = []
diff_bid_2_ask_in_past_5_epochs = []
diff_bid_2_ask_in_past_10_epochs = []
diff_bid_2_ask_in_past_20_epochs = []
avg_buys=[]
avg_sells=[]
avg_buy_diff_sell=[]
avg_amounts=[]
dates = []
cnt = 0
date = date2num(datetime.datetime.fromtimestamp(rows[0][1]))
for cnt in range(20, len(rows)):
row_previous2=rows[cnt-2]
row_previous1 = rows[cnt - 1]
row = rows[cnt]
open=row[2]
high=row[3]
low=row[4]
close=row[5]
vol=row[6]
vol_buy,vol_sell=row[7:9]
avg_buy, avg_sell, avg_amount_per_trade=row[-3:]
date = date + 1
data = (date, open, high, low, close)
alist.append(data)
vols_bid.append(-vol_buy)
vols_ask.append(vol_sell)
diff_bid_2_ask.append(vol_buy-vol_sell)
diff_bid_2_ask_in_past_2_epochs.append(
vol_buy + row_previous1[7] - vol_sell-row_previous1[8])
diff_bid_2_ask_in_past_3_epochs.append(
vol_buy + row_previous1[7] +row_previous2[7] - vol_sell-row_previous1[8]-row_previous2[8])
avg_buy_diff_sell.append(avg_buy-avg_sell)
avg_amounts.append(avg_amount_per_trade*100)
dates.append(date)
# fig, ax = plt.subplots(figsize=(32, 18))
# fig.subplots_adjust(bottom=0.5)
# mpf.candlestick_ohlc(ax, alist, width=0.5, colorup='g', colordown='r', alpha=1.0)
# plt.grid(True)
# # 设置日期刻度旋转的角度
# plt.xticks(rotation=30)
# plt.title('wanda yuanxian 17')
# plt.xlabel('Date')
# plt.ylabel('Price')
# # x轴的刻度为日期
# ax.xaxis_date()
fig, axes = plt.subplots(3, sharex=True, figsize=(64, 30))
mpf.candlestick_ohlc(axes[0], alist, width=0.5, colorup='g', colordown='r')
axes[0].set_title('BTC')
axes[0].set_ylabel('价格')
axes[0].grid(True)
axes[0].xaxis_date()
# axes[1].plot(dates, avg_buy_diff_sell,c='red',linewidth=0.5)
# axes[1].plot(dates, avg_amounts,c='green', linewidth=0.5)
# axes[1].grid(True)
axes[1].plot(dates, avg_buy_diff_sell, c='orange')
axes[1].plot(dates, avg_amounts, c='blue')
axes[1].set_ylabel('成交量')
axes[1].grid(True)
axes[2].plot(dates, diff_bid_2_ask, c='green')
axes[2].plot(dates, diff_bid_2_ask_in_past_2_epochs, c='orange')
axes[2].plot(dates, diff_bid_2_ask_in_past_3_epochs, c='blue')
axes[2].set_ylabel('成交量')
axes[2].grid(True)
axes[2].set_ylabel('买卖均价')
axes[2].grid(True)
plt.show() |
7,865 | 5e1398ed628917a42cc465e7cc2979601f0f4fbc | #!/usr/bin/env python
#****************************************************************************
# fieldformat.py, provides non-GUI base classes for field formating
#
# TreeLine, an information storage program
# Copyright (C) 2006, Douglas W. Bell
#
# This is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License, either Version 2 or any later
# version. This program is distributed in the hope that it will be useful,
# but WITTHOUT ANY WARRANTY. See the included LICENSE file for details.
#****************************************************************************
import re
from xml.sax.saxutils import escape, unescape
from gennumber import GenNumber, GenNumberError
from gendate import GenDate, GenDateError
from gentime import GenTime, GenTimeError
from genboolean import GenBoolean, GenBooleanError
import treedoc
import globalref
_errorStr = '#####'
def xslEscape(text):
"""Encapsulate all literal text in <xsl:text> elements
and transform/escape some non-XML entities.
For the moment, only is supported"""
nonTagRe = re.compile(r'(.*?)(<.*?>)|(.*)')
escDict = {'&nbsp;': ' '} # escape function does '&' first
def esc(matchObj):
"""Return escaped replacement text"""
if matchObj.group(1) == None: # no tags found
return u'<xsl:text>%s</xsl:text>' % \
escape(matchObj.group(3), escDict)
if matchObj.group(1): # leading text and tag
return u'<xsl:text>%s</xsl:text>%s' % \
(escape(matchObj.group(1), escDict), matchObj.group(2))
return matchObj.group(2) # tag only
return nonTagRe.sub(esc, text)
class TextFormat(object):
"""Holds format info for a normal text field"""
typeName = 'Text'
sortSequence = 20
stripTagRe = re.compile('<.*?>')
defaultNumLines = 1
#field format edit options:
defaultFormat = ''
formatMenuList = []
htmlOption = True
hasEditChoices = False
autoAddChoices = False
hasFileBrowse = False
allowAltLinkText = False
def __init__(self, name, attrs={}):
"""Any prefix, suffix, html info in attrs dict"""
self.name = name
self.enName = '' # used only by fileFormat field for i18n
self.format = attrs.get(u'format', self.defaultFormat)
self.prefix = attrs.get(u'prefix', '')
self.suffix = attrs.get(u'suffix', '')
# defaults to no html (line breaks preserved)
self.html = attrs.get(u'html', '').startswith('y') and True or False
self.isRequired = attrs.get(u'required', '').startswith('y') and \
True or False
self.hidden = attrs.get(u'hidden', '').startswith('y') and \
True or False
try:
self.numLines = int(attrs.get(u'lines',
repr(self.defaultNumLines)))
except ValueError:
self.numLines = 1
self.initDefault = attrs.get(u'init', '')
self.linkAltField = attrs.get(u'linkalt', '')
self.parentLevel = 0
self.useFileInfo = False
self.showInDialog = True
self.initFormat()
def initFormat(self):
"""Called by base init, after class change or format text change"""
pass
def duplicateSettings(self, otherField):
"""Assign other field's parameters to this field"""
self.name = otherField.name
self.enName = otherField.enName
self.format = otherField.format
self.prefix = otherField.prefix
self.suffix = otherField.suffix
self.html = otherField.html
self.isRequired = otherField.isRequired
self.hidden = otherField.hidden
self.numLines = otherField.numLines
self.initDefault = otherField.initDefault
self.linkAltField = otherField.linkAltField
self.parentLevel = otherField.parentLevel
self.useFileInfo = otherField.useFileInfo
self.showInDialog = otherField.showInDialog
def changeType(self, newType):
"""Change this field's type to newType with default format"""
self.__class__ = globals()[newType + 'Format']
self.format = self.defaultFormat
self.initFormat()
def englishName(self):
"""Returns English name if assigned, o/w name"""
if self.enName:
return self.enName
return self.name
def sepName(self, englishOnly=False):
"""Return name enclosed with {* *} separators"""
name = englishOnly and self.enName or self.name
if not self.useFileInfo:
return u'{*%s*}' % name
return u'{*!%s*}' % name
def labelName(self):
"""Return name used for labels - add * for required fields"""
if self.isRequired:
return '%s*' % self.name
return self.name
def writeXml(self):
"""Return text for xml attributes"""
text = u' type="%s"' % self.typeName
if self.format:
text += u' format="%s"' % escape(self.format, treedoc.escDict)
if self.prefix:
text += u' prefix="%s"' % escape(self.prefix, treedoc.escDict)
if self.suffix:
text += u' suffix="%s"' % escape(self.suffix, treedoc.escDict)
if self.html:
text += u' html="y"'
if self.isRequired:
text += u' required="y"'
if self.hidden:
text += u' hidden="y"'
if self.numLines > 1:
text += u' lines="%d"' % self.numLines
if self.initDefault:
text += u' init="%s"' % escape(self.initDefault, treedoc.escDict)
if self.linkAltField:
text += u' linkalt="%s"' % escape(self.linkAltField,
treedoc.escDict)
return text
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
if self.useFileInfo:
item = globalref.docRef.fileInfoItem
storedText = item.data.get(self.name, '')
if storedText:
return self.formatOutput(storedText, titleMode, internal)
return ''
def removeMarkup(self, text):
"""Remove HTML Markup and unescape entities"""
text = TextFormat.stripTagRe.sub('', text)
return unescape(text)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
prefix = self.prefix
suffix = self.suffix
if titleMode:
if self.html:
storedText = self.removeMarkup(storedText)
if globalref.docRef.formHtml:
prefix = self.removeMarkup(prefix)
suffix = self.removeMarkup(suffix)
else:
if not self.html:
storedText = escape(storedText).replace('\n', '<br />')
if not globalref.docRef.formHtml:
prefix = escape(prefix)
suffix = escape(suffix)
return u'%s%s%s' % (prefix, storedText, suffix)
def editText(self, item):
"""Return tuple of this field's text in edit format and bool validity,
using edit format option"""
storedText = item.data.get(self.name, '')
result = self.formatEditText(storedText)
if self.isRequired and not result[0]:
return (result[0], False)
return result
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
return (storedText, True)
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
return (editText, editText or not self.isRequired)
def getInitDefault(self):
"""Return initial stored value for new nodes"""
return self.initDefault
def setInitDefault(self, editText):
"""Set initial value from editor version using edit format option"""
self.initDefault = self.storedText(editText)[0]
def getEditInitDefault(self):
"""Return initial value in edit format, found in edit format option"""
return self.formatEditText(self.initDefault)[0]
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
return []
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
return storedText.lower()
def adjustedCompareValue(self, value):
"""Return conditional comparison value with real-time adjustments,
used for date and time types' 'now' value"""
return value
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:if test="normalize-space(./%s)">%s'\
'<xsl:value-of select="./%s"/>%s</xsl:if>' % \
(self.name, xslEscape(self.prefix), self.name,
xslEscape(self.suffix))
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(./%s)' % self.name
class LongTextFormat(TextFormat):
"""Holds format info for a long text field - Obsolete -
kept for compatability with old files"""
# typeName = 'LongText'
defaultNumLines = 7
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
class NumberFormat(TextFormat):
"""Holds format info for a number field"""
typeName = 'Number'
sortSequence = 10
#field format edit options:
defaultFormat = u'#.##'
formatMenuList = [(u'%s\t%s' % (_('Optional Digit'), '#'), '#'),
(u'%s\t%s' % (_('Required Digit'), '0'), '0'),
(u'%s\t%s' % (_('Digit or Space (external)'),
_('<space>')), ' '),
None,
(u'%s\t%s' % (_('Decimal Point'), '.'), '.'),
(u'%s\t%s' % (_('Decimal Comma'), ','), ','),
None,
(u'%s\t%s' % (_('Comma Separator'), '\,'), '\,'),
(u'%s\t%s' % (_('Dot Separator'), '\.'), '\.'),
(u'%s\t%s' % (_('Space Separator (internal)'),
_('<space>')), ' '),
None,
(u'%s\t%s' % (_('Optional Sign'), '-'), '-'),
(u'%s\t%s' % (_('Required Sign'), '+'), '+'),
None,
(u'%s\t%s' % (_('Exponent (capital)'), 'E'), 'E'),
(u'%s\t%s' % (_('Exponent (small)'), 'e'), 'e')]
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
try:
text = GenNumber(storedText).numStr(self.format)
except GenNumberError:
text = _errorStr
return TextFormat.formatOutput(self, text, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using self.format"""
try:
return (GenNumber(storedText).numStr(self.format), True)
except GenNumberError:
return (storedText, not storedText)
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using self.format"""
try:
return (repr(GenNumber().setFromStr(editText, self.format)), True)
except GenNumberError:
return (editText, not editText and not self.isRequired)
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
try:
return GenNumber(storedText).num
except GenNumberError:
return ''
class ChoiceFormat(TextFormat):
"""Holds format info for a field with one of several text options"""
typeName = 'Choice'
sortSequence = 20
editSep = '/'
#field format edit options:
defaultFormat = '1/2/3/4'
formatMenuList = [(u'%s\t%s' % (_('Separator'), '/'), '/'), None,
(u'%s\t%s' % (_('"/" Character'), '//'), '//'), None,
(u'%s\t%s' % (_('Example'), '1/2/3/4'), '1/2/3/4')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.formatList = self.splitText(self.format)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
if storedText not in self.formatList:
storedText = _errorStr
return TextFormat.formatOutput(self, storedText, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
if storedText in self.formatList:
return (storedText, True)
return (storedText, not storedText)
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
if editText in self.formatList:
return (editText, True)
return (editText, not editText and not self.isRequired)
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and any annotation text"""
return [(text, '') for text in self.formatList]
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
return [text for text in self.formatList]
def splitText(self, textStr):
"""Split textStr using editSep, double sep's become char"""
return [text.strip().replace('\0', self.editSep) for text in
textStr.replace(self.editSep * 2, '\0').
split(self.editSep)]
class CombinationFormat(ChoiceFormat):
"""Holds format info for a field of combinations of text options"""
typeName = 'Combination'
outputSepList = (',', ';', ':', '|', '/', '\\', '~')
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
ChoiceFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
ChoiceFormat.initFormat(self)
fullFormat = ''.join(self.formatList)
try:
self.sep = [sep for sep in CombinationFormat.outputSepList
if sep not in fullFormat][0] + ' '
except IndexError:
self.sep = CombinationFormat.outputSepList[0] + ' '
def sortedChoices(self, inText):
"""Return tuple of choices from inText sorted like format and
True if all splits are valid and included"""
choices = self.splitText(inText)
sortedChoices = [text for text in self.formatList if text in choices]
if len(choices) == len(sortedChoices):
return (sortedChoices, True)
else:
return (sortedChoices, False)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
choices, valid = self.sortedChoices(storedText)
if valid:
result = self.sep.join(choices)
else:
result = _errorStr
return TextFormat.formatOutput(self, result, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
for choice in self.splitText(storedText):
if choice not in self.formatList:
return (storedText, not storedText)
return (storedText, True)
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
choices, valid = self.sortedChoices(editText)
if valid:
return (self.editSep.join(choices), True)
else:
return (editText, not editText and not self.isRequired)
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and any annotation text"""
currentChoices, valid = self.sortedChoices(currentText)
nonChoices = [text for text in self.formatList
if text not in currentChoices]
results = []
for choice in nonChoices: # menu entries to add a choice
allChoices = currentChoices + [choice]
allChoices = [text for text in self.formatList
if text in allChoices]
results.append((self.editSep.join(allChoices),
'(%s %s)' % (_('add'), choice)))
if currentChoices:
results.append((None, None)) # separator
for choice in currentChoices: # menu entries to remove a choice
allChoices = currentChoices[:]
allChoices.remove(choice)
allChoices = [text for text in self.formatList
if text in allChoices]
results.append((self.editSep.join(allChoices),
'(%s %s)' % (_('remove'), choice)))
return results
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
return [entry[0] for entry in self.getEditChoices()]
class AutoChoiceFormat(ChoiceFormat):
"""Holds format info for a field with one of several text options"""
typeName = 'AutoChoice'
#field format edit options:
defaultFormat = ''
formatMenuList = ()
hasEditChoices = True
autoAddChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.formatList = []
def addChoice(self, choice, sort=False):
"""Add choice to edit menu list if not already there"""
if choice and choice not in self.formatList:
self.formatList.append(choice)
if sort:
self.sortChoices()
def sortChoices(self):
"""Sort menu list choices"""
self.formatList.sort()
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
return TextFormat.formatOutput(self, storedText, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
return (storedText, True)
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
if editText:
return (editText, True)
return (editText, not self.isRequired)
class DateFormat(TextFormat):
"""Holds format info for a date field"""
typeName = 'Date'
sortSequence = 5
#field format edit options:
defaultFormat = u'mmmm d, yyyy'
dateStampStrings = ('Now', _('Now', 'date stamp setting'))
formatMenuList = [(u'%s\t%s' % (_('Day (1 or 2 digits)'), 'd'), 'd'),
(u'%s\t%s' % (_('Day (2 digits)'), 'dd'), 'dd'),
None,
(u'%s\t%s' % (_('Month (1 or 2 digits)'), 'm'), 'm'),
(u'%s\t%s' % (_('Month (2 digits)'), 'mm'), 'mm'),
(u'%s\t%s' % (_('Month Abbreviation'), 'mmm'), 'mmm'),
(u'%s\t%s' % (_('Month Name'), 'mmmm'), 'mmmm'),
None,
(u'%s\t%s' % (_('Year (2 digits)'), 'yy'), 'yy'),
(u'%s\t%s' % (_('Year (4 digits)'), 'yyyy'), 'yyyy'),
None,
(u'%s\t%s' % (_('Weekday (1 digit)'), 'w'), 'w'),
(u'%s\t%s' % (_('Weekday Abbreviation'), 'www'), 'www'),
(u'%s\t%s' % (_('Weekday Name'), 'wwww'), 'wwww')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
try:
text = GenDate(storedText).dateStr(self.format)
except GenDateError:
text = _errorStr
return TextFormat.formatOutput(self, text, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
format = globalref.options.strData('EditDateFormat', True)
try:
return (GenDate(storedText).dateStr(format), True)
except GenDateError:
return (storedText, not storedText)
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
format = globalref.options.strData('EditDateFormat', True)
try:
return (repr(GenDate().setFromStr(editText, format)), True)
except GenDateError:
return (editText, not editText and not self.isRequired)
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and any annotation text"""
format = globalref.options.strData('EditDateFormat', True)
today = GenDate().dateStr(format)
yesterday = (GenDate() - 1).dateStr(format)
tomorrow = (GenDate() + 1).dateStr(format)
return [(today, '(%s)' % _('today')),
(yesterday, '(%s)' % _('yesterday')),
(tomorrow, '(%s)' % _('tomorrow'))]
def getInitDefault(self):
"""Return initial stored value for new nodes"""
if self.initDefault in DateFormat.dateStampStrings:
return GenDate().dateStr()
return TextFormat.getInitDefault(self)
def setInitDefault(self, editText):
"""Set initial value from editor version using edit format option"""
if editText in DateFormat.dateStampStrings:
self.initDefault = DateFormat.dateStampStrings[0]
else:
TextFormat.setInitDefault(self, editText)
def getEditInitDefault(self):
"""Return initial value in edit format, found in edit format option"""
if self.initDefault in DateFormat.dateStampStrings:
return DateFormat.dateStampStrings[1]
return TextFormat.getEditInitDefault(self)
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
choices = [entry[0] for entry in self.getEditChoices()]
choices.insert(0, DateFormat.dateStampStrings[1])
return choices
def adjustedCompareValue(self, value):
"""Return conditional comparison value with real-time adjustments,
used for date and time types' 'now' value"""
if value.startswith('now'):
return repr(GenDate())
return value
class TimeFormat(TextFormat):
"""Holds format info for a time field"""
typeName = 'Time'
sortSequence = 6
#field format edit options:
defaultFormat = u'h:MM:SS aa'
timeStampStrings = ('Now', _('Now', 'time stamp setting'))
formatMenuList = [(u'%s\t%s' % (_('Hour (0-23, 1 or 2 digits)'), 'H'),
'H'),
(u'%s\t%s' % (_('Hour (00-23, 2 digits)'), 'HH'), 'HH'),
(u'%s\t%s' % (_('Hour (1-12, 1 or 2 digits)'), 'h'),
'h'),
(u'%s\t%s' % (_('Hour (01-12, 2 digits)'), 'hh'), 'hh'),
None,
(u'%s\t%s' % (_('Minute (1 or 2 digits)'), 'M'), 'M'),
(u'%s\t%s' % (_('Minute (2 digits)'), 'MM'), 'MM'),
None,
(u'%s\t%s' % (_('Second (1 or 2 digits)'), 'S'), 'S'),
(u'%s\t%s' % (_('Second (2 digits)'), 'SS'), 'SS'),
(u'%s\t%s' % (_('Fractional Seconds'), 's'), 's'),
None,
(u'%s\t%s' % (_('AM/PM'), 'AA'), 'AA'),
(u'%s\t%s' % (_('am/pm'), 'aa'),'aa')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
try:
text = GenTime(storedText).timeStr(self.format)
except GenTimeError:
text = _errorStr
return TextFormat.formatOutput(self, text, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
format = globalref.options.strData('EditTimeFormat', True)
try:
return (GenTime(storedText).timeStr(format), True)
except GenTimeError:
return (storedText, not storedText)
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
try:
return (repr(GenTime(editText)), True)
except GenTimeError:
return (editText, not editText and not self.isRequired)
def getEditChoices(self, currentText=''):
"""Return list of choices for combo box,
each a tuple of edit text and annotated text"""
format = globalref.options.strData('EditTimeFormat', True)
now = GenTime().timeStr(format)
choices = [(now, '(%s)' % _('now'))]
for hr in (6, 9, 12, 15, 18, 21, 0):
time = GenTime((hr, 0)).timeStr(format)
choices.append((time, ''))
return choices
def getInitDefault(self):
"""Return initial stored value for new nodes"""
if self.initDefault in TimeFormat.timeStampStrings:
return GenTime().timeStr()
return TextFormat.getInitDefault(self)
def setInitDefault(self, editText):
"""Set initial value from editor version using edit format option"""
if editText in TimeFormat.timeStampStrings:
self.initDefault = TimeFormat.timeStampStrings[0]
else:
TextFormat.setInitDefault(self, editText)
def getEditInitDefault(self):
"""Return initial value in edit format, found in edit format option"""
if self.initDefault in TimeFormat.timeStampStrings:
return TimeFormat.timeStampStrings[1]
return TextFormat.getEditInitDefault(self)
def initDefaultChoices(self):
"""Return a list of choices for setting the init default"""
choices = [entry[0] for entry in self.getEditChoices()]
choices.insert(0, TimeFormat.timeStampStrings[1])
return choices
def adjustedCompareValue(self, value):
"""Return conditional comparison value with real-time adjustments,
used for date and time types' 'now' value"""
if value.startswith('now'):
return repr(GenTime())
return value
class BooleanFormat(ChoiceFormat):
"""Holds format info for a bool field"""
typeName = 'Boolean'
sortSequence = 1
#field format edit options:
defaultFormat = _('yes/no')
formatMenuList = [(_('true/false'), _('true/false')),
(_('T/F'), _('T/F')), None,
(_('yes/no'), _('yes/no')),
(_('Y/N'), _('Y/N')), None,
('1/0', '1/0')]
hasEditChoices = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
ChoiceFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped if not in titleMode"""
if storedText not in self.formatList:
try:
storedText = GenBoolean(storedText).boolStr(self.format)
except GenBooleanError:
storedText = _errorStr
return TextFormat.formatOutput(self, storedText, titleMode, internal)
def formatEditText(self, storedText):
"""Return tuple of text in edit format and bool validity,
using edit format option"""
if storedText in self.formatList:
return (storedText, True)
try:
return (GenBoolean(storedText).boolStr(self.format), True)
except GenBooleanError:
return (storedText, not storedText)
def storedText(self, editText):
"""Return tuple of stored text from edited text and bool validity,
using edit format option"""
try:
return (repr(GenBoolean(editText)), True)
except GenBooleanError:
if editText in self.formatList:
return (editText, True)
return (editText, not editText and not self.isRequired)
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
try:
return repr(GenBoolean(storedText))
except GenBooleanError:
return ''
class UniqueIDFormat(TextFormat):
"""An unique ID automatically generated for new nodes"""
typeName = 'UniqueID'
sortSequence = 10
formatRe = re.compile('([^0-9]*)([0-9]+)(.*)')
#field format edit options:
defaultFormat = u'0001'
formatMenuList = [(u'%s\t%s' % (_('Required Digit'), '0'), '0'), None,
(u'%s\t%s' % (_('Start Num Example'), '0100'), '0100'),
(u'%s\t%s' % (_('Prefix Example'), 'id0100'), 'id0100')]
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def nextValue(self, increment=True):
"""Return the next value for a new node,
increment format if increment is True"""
try:
prefix, numText, suffix = UniqueIDFormat.formatRe.\
match(self.format).groups()
except AttributeError:
self.format = UniqueIDFormat.defaultFormat
return self.nextValue(increment)
value = self.format
if increment:
pattern = u'%%s%%0.%dd%%s' % len(numText)
num = int(numText) + 1
self.format = pattern % (prefix, num, suffix)
return value
def sortValue(self, data):
"""Return value to be compared for sorting and conditionals"""
storedText = data.get(self.name, '')
try:
return int(UniqueIDFormat.formatRe.match(storedText).group(2))
except AttributeError:
return 0
class URLFormat(TextFormat):
"""Holds format info for a field with a URL path"""
typeName = 'URL'
sortSequence = 8
htmlOption = False
allowAltLinkText = True
hasMethodRe = re.compile('[a-zA-Z][a-zA-Z]+:|#')
URLMethod = u'http://'
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.html = True
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
if self.useFileInfo:
item = globalref.docRef.fileInfoItem
altText = ''
if self.linkAltField:
field = item.nodeFormat().findField(self.linkAltField)
if field:
altText = field.outputText(item, titleMode, internal)
storedText = item.data.get(self.name, '')
if storedText:
return self.formatOutput(storedText, titleMode, altText, internal)
return ''
def formatOutput(self, storedText, titleMode, altText='', internal=False):
"""Return formatted text, properly escaped and with
a link reference if not in titleMode"""
if titleMode:
return TextFormat.formatOutput(self, storedText, titleMode,
internal)
paths = storedText.split('\n')
results = []
for url in paths:
path = url
if not URLFormat.hasMethodRe.match(path):
path = u'%s%s' % (self.URLMethod, path)
path = u'<a href="%s">%s</a>' % (escape(path, treedoc.escDict),
altText or url)
results.append(TextFormat.formatOutput(self, path, titleMode,
internal))
return u'<br />'.join(results)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:for-each select = "./%s">%s<xsl:choose>'\
'<xsl:when test="contains(., \':\')"><a href="{.}">'\
'<xsl:value-of select="."/></a></xsl:when><xsl:otherwise>'\
'<a href="%s{.}"><xsl:value-of select="."/></a>'\
'</xsl:otherwise></xsl:choose>%s</xsl:for-each>' % \
(self.name, xslEscape(self.prefix), self.URLMethod,
xslEscape(self.suffix))
class PathFormat(URLFormat):
"""Holds format info for a field with a local path"""
typeName = 'Path'
URLMethod = u'file:///'
hasFileBrowse = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
class EmailFormat(URLFormat):
"""Holds format info for a field with a local path"""
typeName = 'Email'
URLMethod = u'mailto:'
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
class InternalLinkFormat(URLFormat):
"""Holds format info for a field with a local path"""
typeName = 'InternalLink'
URLMethod = u'#'
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
class ExecuteLinkFormat(URLFormat):
"""Holds format info for an executable field"""
typeName = 'ExecuteLink'
URLMethod = u'exec:'
hasFileBrowse = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
URLFormat.__init__(self, name, attrs)
def formatOutput(self, storedText, titleMode, altText='', internal=False):
"""Return formatted text, properly escaped and with
a link reference if not in titleMode"""
if titleMode or not internal:
return TextFormat.formatOutput(self, storedText, titleMode,
internal)
paths = storedText.split('\n')
results = []
for url in paths:
# add prefix/suffix within the executable path:
url = TextFormat.formatOutput(self, url, titleMode, internal)
path = url
if not URLFormat.hasMethodRe.match(path):
path = u'%s%s' % (self.URLMethod, path)
results.append(u'<a href="%s">%s</a>' %
(escape(path, treedoc.escDict), altText or url))
return u'<br />'.join(results)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return TextFormat.xslText(self)
class PictureFormat(TextFormat):
"""Holds format info for a field with a link to a picture"""
typeName = 'Picture'
sortSequence = 8
htmlOption = False
hasFileBrowse = True
def __init__(self, name, attrs={}):
"""Any format, prefix, suffix, html info in attrs dict"""
TextFormat.__init__(self, name, attrs)
def initFormat(self):
"""Called by base init, after class change or format text change"""
self.html = True
def formatOutput(self, storedText, titleMode, internal=False):
"""Return formatted text, properly escaped and with
a link to the picture if not in titleMode"""
if titleMode:
return TextFormat.formatOutput(self, storedText, titleMode,
internal)
paths = storedText.split('\n')
results = ['<img src="%s">' % escape(url, treedoc.escDict) for url
in paths]
return u'<br />'.join(results)
class ParentFormat(TextFormat):
"""Placeholder format for references to specific parents"""
typeName = 'Parent'
def __init__(self, name, parentLevel=1):
TextFormat.__init__(self, name, {})
self.parentLevel = parentLevel
def sepName(self, englishOnly=False):
"""Return name enclosed with {* *} separators"""
name = englishOnly and self.enName or self.name
return u'{*%s%s*}' % (self.parentLevel * '*', name)
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
for num in range(self.parentLevel):
item = item.parent
if not item:
return ''
field = item.nodeFormat().findField(self.name)
if not field:
return ''
return field.outputText(item, titleMode, internal)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:value-of select="%s%s"/>' % (self.parentLevel * '../',
self.name)
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(%s%s)' % (self.parentLevel * '../', self.name)
class AncestorFormat(TextFormat):
"""Placeholder format for references to any parent with data"""
typeName = 'Ancestor'
def __init__(self, name):
TextFormat.__init__(self, name, {})
self.parentLevel = 1000
def sepName(self, englishOnly=False):
"""Return name enclosed with {*? *} separators"""
name = englishOnly and self.enName or self.name
return u'{*?%s*}' % (name)
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
field = None
while not field:
item = item.parent
if item:
field = item.nodeFormat().findField(self.name)
else:
return ''
return field.outputText(item, titleMode, internal)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:value-of select="ancestor::*/%s"/>' % self.name
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(ancestor::*/%s)' % self.name
class ChildFormat(TextFormat):
"""Placeholder format for references to a sequence of child data"""
typeName = 'Child'
def __init__(self, name):
TextFormat.__init__(self, name, {})
self.parentLevel = -1
def sepName(self, englishOnly=False):
"""Return name enclosed with {*? *} separators"""
name = englishOnly and self.enName or self.name
return u'{*&%s*}' % (name)
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
result = []
for child in item.childList:
field = child.nodeFormat().findField(self.name)
if field:
text = field.outputText(child, titleMode, internal)
if text:
result.append(text)
return globalref.docRef.childFieldSep.join(result)
def xslText(self):
"""Return what we need to write into an XSL file for this type"""
return u'<xsl:value-of select="child::*/%s"/>' % self.name
def xslTestText(self):
"""Return XSL file test for data existance"""
return u'normalize-space(child::*/%s)' % self.name
class CountFormat(TextFormat):
"""Placeholder format for a count of children at the given level"""
typeName = 'Count'
def __init__(self, name, level):
TextFormat.__init__(self, name, {})
self.parentLevel = -level
def sepName(self, englishOnly=False):
"""Return name enclosed with {*? *} separators"""
name = englishOnly and self.enName or self.name
return u'{*#%s*}' % (name)
def outputText(self, item, titleMode, internal=False):
"""Return formatted text for this field"""
return repr(len(item.descendLevelList(-self.parentLevel)))
|
7,866 | 3ffcab4b36c6ca05f1e667c628ebb873ebdc0d25 | # -*- coding: utf-8 -*-
import serial
import time
import argparse
def write_command(serial, comm, verbose = False, dt = None):
""" Encodes a command and sends it over the serial port """
if verbose and comm != "":
if dt is None:
print("{} \t\t-> {}".format(comm, serial.port))
else:
print("{} \t\t-> {} at {:2.3f} ms".format(comm, serial.port, dt))
serial.write(comm.encode())
def read_buffer(serial):
""" Reads the serial port bufer and decodes it """
resp = serial.read_all()
return resp.decode()
def read_and_print(serial):
""" Obtains serial responser and prints it if it's not empty """
resp = read_buffer(serial)
if resp != "":
print(resp)
def runcommands(cs, ts, ps, serials, verbose = False, profiling = False):
""" Runs a series of commands at certain specified times """
if len(ts) == len(cs):
i = 0
t0 = time.time()
dt = time.time() - t0 # elapsed time
while i < len(cs):
ser = serials[ps[i]]
comm = cs[i]
t = ts[i]
while (dt - t) < 0.0005:
dt = time.time() - t0
if verbose: read_and_print(ser)
if profiling:
write_command(ser, comm, verbose, dt)
else:
write_command(ser, comm, verbose)
i += 1
else:
print('Error: Lists are not equally long. ')
def load_csv(f):
delimiter = ','
ts = []
cs = []
ps = []
for l in f.readlines():
values = l.strip("\n").split(delimiter)
ts.append(float(values[0]))
cs.append(values[1])
if len(values) <= 3: # if there isn't a third field
values.append("") # add an empty one
p = values[2].strip(" ") # take all spaces out
if p == "":
ps.append(ps[-1]) # use previous one if it's empty
else:
ps.append(p)
return ts, cs, ps
# Create argument parser
parser = argparse.ArgumentParser(description='sends a series of commands over the serial port')
parser.add_argument('filename',
type=str, help='CSV file with columns for time, commands and ports')
parser.add_argument('-r', '--reps', required = False, default=1,
type=int, help='Number of command sequence repetitions (default: %(default)s)')
parser.add_argument('-bd', '--baudrate', required = False, default=38400,
type=int, help='Baudrate (default: %(default)s)')
parser.add_argument('-v', '--verbose', required = False,
action='store_true',
help='Print Commands as they are sent (default: %(default)s)')
parser.add_argument('-p', '--profiling', required = False,
action='store_true',
help='Show profiling information if verbose (default: %(default)s).')
# Get parameters
args = parser.parse_args()
#print(args.filename)
#print(args.reps)
#print(args.baudrate)
#print(args.verbose)
#print(args.profiling)
# Parameters
fname = args.filename
reps = args.reps
baudrate = args.baudrate
verbose = args.verbose
profiling = args.profiling
# test.csv -r 2 -b 38400 -v -p
#fname = 'test.csv'
#reps = 2
#baudrate = 38400
#verbose = True
#profiling = True
try:
f = open(fname, 'r')
ts, cs, ps = load_csv(f)
# Repeat all lists the specified number of times
ts_rep = [] # offset each rep's times
for r in range(reps):
for t in ts:
ts_rep.append(t + ts[-1]*r)
cs_rep = cs*reps
ps_reps = ps*reps
# Try to open the serial port connections and run the commands
try:
# Get list of unique portnames
ports = list(set(ps))
serials = {} # serial connections
for port in ports:
ser = serial.Serial(port = port,
baudrate=baudrate,
write_timeout=0,
bytesize=serial.EIGHTBITS,
stopbits=serial.STOPBITS_ONE,
parity=serial.PARITY_NONE)
serials[port] = ser
runcommands(cs_rep, ts_rep, ps_reps, serials, verbose, profiling)
finally:
time.sleep(0.5)
for ser in serials.values():
ser.close()
finally:
f.close() |
7,867 | f29ad02f3781c7a7d2a1f0c97626dd5c7ea2417e | """
CP1404 Practical
unreliable car test
"""
from unreliable_car import UnreliableCar
def main():
good_car = UnreliableCar("good car", 100, 80)
bad_car = UnreliableCar("bad car", 100, 10)
for i in range(10):
print("try to drive {} km".format(i))
print("{:10} drove {:2}km".format(good_car.name, good_car.drive(i)))
print("{:10} drove {:2}km".format(bad_car.name, bad_car.drive(i)))
print(good_car)
print(bad_car)
if __name__ == '__main__':
main()
|
7,868 | 656927013d9a0254e2bc4cdf05b7cfd5947feb05 | from .proxies import Proxies
from .roles import Roles
from .products import Products
from .resourcefiles import ResourceFiles
class Apigee(object):
"""Provides easy access to all endpoint classes
Args:
domain (str): Your Auth0 domain, e.g: 'username.auth0.com'
token (str): Management API v2 Token
"""
def __init__(self, org_name, username, password):
self.proxies = Proxies(org_name, username, password)
self.roles = Roles(org_name, username, password)
self.products = Products(org_name, username, password)
self.resourcefiles = ResourceFiles(org_name, username, password, environment)
|
7,869 | b459919e779063247c176e127368c687c903cf0f | from checkio.home.long_repeat import long_repeat
def test_long_repeat():
assert long_repeat("sdsffffse") == 4, "First"
assert long_repeat("ddvvrwwwrggg") == 3, "Second"
def test_fails_1():
assert long_repeat("") == 0, "Empty String"
def test_fails_2():
assert long_repeat("aa") == 2
|
7,870 | f546eb40ee8a7308ded62532731561029e5ec335 | import requests
import os
from slugify import slugify as PipSlugify
import shutil
# will install any valid .deb package
def install_debian_package_binary(package_path):
os.system("sudo dpkg -i {package_path}".format(
package_path=package_path
))
os.system("sudo apt-get install -f")
def download_install_deb(package_path, package_url):
download_file(package_path, package_url)
install_debian_package_binary(package_path)
remove_file(package_path)
def install_apt_packages(packages):
if not isinstance(packages, basestring):
packages = " ".join(packages)
os.system("sudo apt-get install -y {packages}".format(packages=packages))
# download a file available at source_url to target_path on the file system.
def download_file(target_path, source_url):
try:
# NOTE the stream=True parameter
r = requests.get(source_url, stream=True)
with open(target_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return True
# TODO: better exception handling
except:
return False
def write_file(path, data, mode='w'):
if os.path.exists(path) and mode is not 'a':
pathBAK = path + ".bak"
os.rename(path, pathBAK)
with open(path, mode) as handle:
handle.write(data)
def remove_file(path, replace_with_backup=False):
# make a backup
backup_path = path + ".bak"
shutil.copy(path, backup_path)
# remove the file
if os.path.exists(path):
os.remove(path)
# replace existing with backup
if replace_with_backup and os.path.exists(backup_path):
os.rename(path, backup_path)
# abstract the library choice/implementation of slugify from the installer
def slugify(*args, **kwargs):
return PipSlugify(*args, **kwargs)
def copy_and_backup_original(from_path, to_path):
if os.path.exists(to_path):
rename = to_path + ".bak"
os.rename(to_path, rename)
shutil.copytree(from_path, to_path)
|
7,871 | 57c911c9a10f9d116f1b7099c5202377e16050f1 | from typing import *
class Solution:
def isValidSudoku(self, board: List[List[str]]) -> bool:
cells = {}
for i in range(9):
for j in range(9):
if board[i][j] != ".":
val = board[i][j]
# is unique in row
for k in range(j-1, -1, -1):
if val == board[i][k]:
return False
for k in range(j+1, 9):
if val == board[i][k]:
return False
# is unique in col
for k in range(i-1, -1, -1):
if val == board[k][j]:
return False
for k in range(i+1, 9):
if val == board[k][j]:
return False
idx = i // 3 * 3 + j // 3
if idx in cells:
if val in cells[idx]:
return False
else:
cells[idx].append(val)
else:
cells[idx] = [val]
return True
|
7,872 | 85f5f9370896eac17dc72bbbf8d2dd1d7adc3a5b | """
"""
import cPickle as pickle
def convert_cpu_stats_to_num_array(cpuStats):
"""
Given a list of statistics (tuples[timestamp, total_cpu, kernel_cpu, vm, rss])
Return five numarrays
"""
print "Converting cpus stats into numpy array"
c0 = []
c1 = []
c2 = []
c3 = []
c4 = []
# TODO - need a pythonic/numpy way for corner turning
gc.disable()
for c in cpuStats:
c0.append(c[0])
c1.append(c[1])
c2.append(c[2])
c3.append(c[3])
c4.append(c[4])
gc.enable()
return (np.array(c0), np.array(c1), np.array(c2), np.array(c3), np.array(c4))
def plot_cpu_mem_usage_from_file(cpufile, figfile, stt=None, x_max=None, time_label=None):
"""
Plot CPU and memory usage from a cpu log file
parameters:
cpufile: the full path of the cpu log file (string)
figfile: the full path of the plot file (string)
stt: start time stamp in seconds (Integer,
None if let it done automatically)
x_max: the duration of the time axis in seconds (Integer,
None automatically set)
time_label: full path to the application activity log (string)
each line is something like this:
2014-08-17 04:44:24 major cycle 3
2014-08-17 04:45:44 make image
If set, the plot tries to draw vertical lines along the
time axis to show these activities This is an experimental
feature, need more work
"""
reList = []
if os.path.exists(cpufile):
try:
pkl_file = open(cpufile, 'rb')
print 'Loading CPU stats object from file %s' % cpufile
cpuStatsList = pickle.load(pkl_file)
pkl_file.close()
if cpuStatsList == None:
raise Exception("The CPU stats object is None when reading from the file")
reList += cpuStatsList
#return cpuStatsList
except Exception, e:
ex = str(e)
import traceback
print 'Fail to load the CPU stats from file %s: %s' % (cpufile, ex)
traceback.print_exc()
raise e
else:
print 'Cannot locate the CPU stats file %s' % cpufile
fig = pl.figure()
plot_cpu_mem_usage(fig, x_max, reList, stt, standalone = True, time_label = time_label)
#fig.savefig('/tmp/cpu_mem_usage.pdf')
fig.savefig(figfile)
pl.close(fig)
def plot_cpu_mem_usage(fig, cpuStats, x_max = None, stt = None,
standalone = False, time_label = None):
if standalone:
ax1 = fig.add_subplot(111)
else:
ax1 = fig.add_subplot(211)
ax1.set_xlabel('Time (seconds)', fontsize = 9)
ax1.set_ylabel('CPU usage (% of Wall Clock time)', fontsize = 9)
ax1.set_title('CPU and Memory usage', fontsize=10)
ax1.tick_params(axis='both', which='major', labelsize=8)
ax1.tick_params(axis='both', which='minor', labelsize=6)
# get the data in numpy array
ta, tc, kc, vm, rss = convert_cpu_stats_to_num_array(cpuStats)
if stt is None:
stt = ta
ta -= stt
st = int(ta[0])
ed = int(ta[-1])
if x_max is None:
x_max = ed
elif ed > x_max:
x_max = ed
# create x-axis (whole integer seconds) between st and ed
# x = np.r_[st:ed + 1]
x = ta.astype(np.int64)
# plot the total cpu
ax1.plot(x, tc, color = 'g', linestyle = '-', label = 'total cpu')
# plot the kernel cpu
ax1.plot(x, kc, color = 'r', linestyle = '--', label = 'kernel cpu')
# plot the virtual mem
ax2 = ax1.twinx()
ax2.set_ylabel('Memory usage (MB)', fontsize = 9)
ax2.tick_params(axis='y', which='major', labelsize=8)
ax2.tick_params(axis='y', which='minor', labelsize=6)
ax2.plot(x, vm / 1024.0 ** 2, color = 'b', linestyle = ':', label = 'virtual memory')
# plot the rss
ax2.plot(x, rss / 1024.0 ** 2, color = 'k', linestyle = '-.', label = 'resident memory')
mmm = max(tc)
ax1.set_ylim([0, 1.5 * mmm])
ax1.set_xlim([0, x_max]) # align the time axis to accommodate cpu/memory
# it should read a template and then populate the time
if time_label:
import datetime
with open(time_label) as f:
c = 0
for line in f:
fs = line.split('\t')
aa = fs[0].replace(' ', ',').replace('-',',').replace(':',',')
aaa = aa.split(',')
tstamp = (datetime.datetime(int(aaa[0]),int(aaa[1]),int(aaa[2]),int(aaa[3]),int(aaa[4]),int(aaa[5])) - datetime.datetime(1970,1,1)).total_seconds()
tstamp -= stt
if (c % 2 == 0):
delt = 0
co = 'k'
ls = 'dotted'
else:
delt = 50
co = 'm'
ls = 'dashed'
ax1.vlines(tstamp, 0, 1.5 * mmm, colors = co, linestyles=ls)
ax1.text(tstamp - 25, 1 * mmm + delt, fs[1], fontsize = 7)
c += 1
ax1.legend(loc='upper left', shadow=True, prop={'size':8})
ax2.legend(loc='upper right', shadow=True, prop={'size':8})
|
7,873 | 63a9060e9933cc37b7039833be5f071cc7bf45bf | #import getCanditatemap() from E_18_hacksub
import operator, pdb, collections, string
ETAOIN = """ etaoinsrhldcumgyfpwb.,vk0-'x)(1j2:q"/5!?z346879%[]*=+|_;\>$#^&@<~{}`""" #order taken from https://mdickens.me/typing/theory-of-letter-frequency.html, with space added at the start, 69 characters overall
length = 128
#ETAOIN ="ETAOINSHRDLCUMWFGYPBVKJXQZ"
def getCanditatemap():
return (dict.fromkeys((chr(i) for i in range (length)),0)) # https://stackoverflow.com/questions/2241891/how-to-initialize-a-dict-with-keys-from-a-list-and-empty-value-in-python/2241904
def getLettercount(mess):
charcount = getCanditatemap()
for char in mess:
if char in charcount:
charcount[char] +=1
return charcount
def getFreqOrder(mess):
#get a dictionary of each letter and its frequency count
lettertofreq = getLettercount(mess)
# second, make a dictionary of each frequency count to each letter(s) with that frequency
freqtochar = {}
for i in range(length):
i=chr(i)
if lettertofreq[i] not in freqtochar: # look for frequencies not present
freqtochar[lettertofreq[i]] = [i] # add if not present, else append
else:
freqtochar[lettertofreq[i]].append(i)
#reverse ETAOIN order, for each list of letters (per frequency)
for freq in freqtochar:
freqtochar[freq].sort(key=ETAOIN.find, reverse=True)
freqtochar[freq] = ''.join(freqtochar[freq]) # convert to string
# sort them in order of frequency
#freqpairs = sorted(freqtochar.items(), key=operator.itemgetter(0), reverse=True)
freqpairs = collections.OrderedDict(sorted(freqtochar.items(), reverse=True))
# extractst the values and joins them together
freqorder = []
#print freqtochar
values = freqpairs.values() # grabs the values only
for freqpair in values:
#print freqpair
#pdb.set_trace()
freqorder.append(freqpair)
return ''.join(freqorder)
def englishFreqMatch(message):
#print message
matchscore =0
freqOrder = getFreqOrder(message.lower()) # convert to lower case as we are just looking for frequency match score, so case of the letter should not matter
#print freqOrder
#pdb.set_trace()
for commletter in (ETAOIN[:16] or ETAOIN[-16:]):
if commletter in (freqOrder[:16] or freqOrder[-16:]):
matchscore +=1
return matchscore
|
7,874 | e766bba4dec0d37858f1f24083c238763d694109 | from otree.api import (
models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,
Currency as c, currency_range
)
import random
import itertools
doc = """
Public good game section (Rounds and feedback).
"""
class Constants(BaseConstants):
name_in_url = 'public_goods'
players_per_group = 2
num_rounds = 2
results_template = 'public_goods/Results_c.html'
"""Amount allocated to each player"""
max_savings = c(5)
multiplier = 1
class Subsession(BaseSubsession):
def vars_for_admin_report(self):
savings_session = [p.savings for p in self.get_players() if p.savings != None]
if savings_session:
return {
'avg_saving': sum(savings_session)/len(savings_session),
'min_saving': min(savings_session),
'max_saving': max(savings_session),
}
else:
return {
'avg_saving': '(no data)',
'min_saving': '(no data)',
'max_saving': '(no data)',
}
def creating_session(self):
# self.Constants.endowment = self.session.config['endowment']
# treatments = itertools.cycle(['control', 't1', 't2','t3'])
endowment = c(self.session.config['endowment'])
for g in self.get_groups():
g.com_goal = self.session.config['community_goal_decimal']
if self.round_number == 1:
for g in self.get_groups():
# treatment = next(treatments)
for p in g.get_players():
# p.participant.vars['treat'] = treatment
# p.treat = p.participant.vars['treat']
p.participant.vars['endowment'] = endowment
p.endowment = p.participant.vars['endowment']
# if self.round_number > 1:
# for p in self.get_players():
# p.treat = p.participant.vars['treat']
class Group(BaseGroup):
com_goal = models.FloatField(min=0, max=1)
total_savings = models.CurrencyField(initial=0)
average_savings = models.CurrencyField()
individual_savings_share = models.FloatField()
min_round = models.IntegerField(initial=1, doc="go back to x last round. E.g. 1 for last round")
def set_payoffs(self):
people_in_treatment = self.get_players()
people_in_treatment_num = len(people_in_treatment)
self.total_savings = sum([p.savings for p in people_in_treatment])
self.individual_savings_share = self.total_savings / (people_in_treatment_num * self.session.config['endowment'])
self.average_savings = self.total_savings / people_in_treatment_num
if self.com_goal > 0:
if self.individual_savings_share >= self.com_goal:
for p in people_in_treatment:
p.participant.vars['endowment'] = p.participant.vars['endowment'] - p.savings
p.financial_reward = (p.participant.vars['endowment']).to_real_world_currency(self.session) + (self.total_savings / Constants.players_per_group).to_real_world_currency(self.session)
p.endowment = p.participant.vars['endowment']
if self.round_number > self.min_round:
p.last_savings = p.in_round(self.round_number - self.min_round).savings
else:
for p in self.get_players():
p.participant.vars['endowment'] = p.participant.vars['endowment'] - p.savings
p.financial_reward = p.participant.vars['endowment'].to_real_world_currency(self.session)
p.endowment = p.participant.vars['endowment']
if self.round_number > self.min_round:
p.last_savings = p.in_round(self.round_number - self.min_round).savings
#
#def set_payoffs(self):
# for treatment_name in ['control', 'D', 'DTI']:
# people_in_treatment = self.get_players_by_treatment(treatment_name)
# people_in_treatment_num = len(people_in_treatment)
# total_savings = sum([p.savings for p in people_in_treatment])
# individual_savings_share = total_savings / (people_in_treatment_num * self.session.config['endowment'])
# average_savings = total_savings / people_in_treatment_num
#
# if self.com_goal > 0:
# if individual_savings_share >= self.com_goal:
# for p in people_in_treatment:
# p.participant.vars['endowment'] = p.participant.vars['endowment'] - p.savings
# p.financial_reward = p.participant.vars['endowment'].to_real_world_currency(self.session) + (self.total_savings / Constants.players_per_group).to_real_world_currency(self.session)
# p.endowment = p.participant.vars['endowment']
# if self.round_number > self.min_round:
# p.last_savings = p.in_round(self.round_number - self.min_round).savings
# else:
# for p in self.get_players_by_treatment(treatment_name):
# p.participant.vars['endowment'] = p.participant.vars['endowment'] - p.savings
# p.financial_reward = p.participant.vars['endowment'].to_real_world_currency(self.session)
# p.endowment = p.participant.vars['endowment']
# if self.round_number > self.min_round:
# p.last_savings = p.in_round(self.round_number - self.min_round).savings
#
class Player(BasePlayer):
treatment = models.CharField(doc="Treatment of each player")
endowment = models.CurrencyField(
min=0,
doc="endowment by each player"
)
peers =
savings = models.CurrencyField(min=0, max=Constants.max_savings, doc="Savings by each player",choices=[c(0), c(2), c(4)])
financial_reward = models.FloatField(min=0)
last_savings = models.CurrencyField(initial=0)
|
7,875 | 5ee1d8ef7ec4b191e0789ceb9c6dd2d58af526a0 | # -*- coding: utf-8 -*-
import pytest
from bravado.client import ResourceDecorator
from bravado.client import SwaggerClient
def test_resource_exists(petstore_client):
assert type(petstore_client.pet) == ResourceDecorator
def test_resource_not_found(petstore_client):
with pytest.raises(AttributeError) as excinfo:
petstore_client.foo
assert 'foo not found' in str(excinfo.value)
@pytest.fixture
def client_tags_with_spaces():
return SwaggerClient.from_spec({
'swagger': '2.0',
'info': {
'version': '',
'title': 'API'
},
'paths': {
'/ping': {
'get': {
'operationId': 'ping',
'responses': {
'200': {
'description': 'ping'
}
},
'tags': [
'my tag'
]
}
}
}
})
def test_get_resource(client_tags_with_spaces):
assert type(client_tags_with_spaces._get_resource('my tag')) == ResourceDecorator
|
7,876 | 07b6ded9b4841bdba62d481664a399f0b125fcbf | import pandas as pd;
import time;
import matplotlib.pyplot as plt;
import matplotlib.cm as cm
import matplotlib.patches as mpatch;
import numpy as np;
import sys;
sys.path.append("/uufs/chpc.utah.edu/common/home/u0403692/prog/prism/test")
import bettersankey as bsk;
datapath = "/uufs/chpc.utah.edu/common/home/u0403692/prog/prism/data/timeuse/"
print("reading...")
acttable = pd.read_csv(datapath + "atusact_2015/atusact_2015.dat")
infotable = pd.read_csv(datapath + "atusresp_2015/atusresp_2015.dat")
print("joining...")
jointable = pd.merge(acttable,infotable,on='TUCASEID')
#tiermode='TRTIER2'
tiermode='TRCODE'
#columns=['case','day','hour','origin','dest','corigin','cdest']
trans = pd.DataFrame();
print("processing...")
trans['case'] = jointable['TUCASEID']
trans['caseshift'] = jointable['TUCASEID'].shift(-1)
trans['step'] = jointable['TUACTIVITY_N']
trans['day'] = jointable['TUDIARYDAY']
trans['hour'] = jointable['TUCUMDUR24'].apply(lambda x: np.floor(x/60.0))
trans['origin'] = jointable[tiermode]
trans['dest'] = jointable[tiermode].shift(-1)
trans['corigin'] = jointable.apply((lambda x: (x['TUCC5'] == 1) or (x['TUCC5B'] == 1) or (x['TUCC7'] == 1) or (x['TUCC8'] == 1)),axis=1)
trans['cdest'] = trans['corigin'].shift(-1)
trans = trans[trans.caseshift.notnull()]
trans['caseshift'] = trans['caseshift'].apply(lambda x:int(x))
trans['dest'] = trans['dest'].apply(lambda x:int(x))
trans = trans[trans.case == trans.caseshift]
trans.drop('caseshift',axis=1,inplace =True)
trans.to_csv(datapath + "transitions.csv");
print(len(set(trans['dest'])));
s = trans.groupby(['origin','dest']).size()
# s.to_csv(datapath + "transitioncounts.csv")
print("plotting...")
v = s.unstack().as_matrix();
v[np.isnan(v)] = 0.0;
logv = np.log10(v);
logv[np.isneginf(logv)] = 0.0;
print("Max value:", np.max(v), " (",np.max(logv),")")
plt.pcolormesh(logv,cmap='Blues');
plt.colorbar();
plt.yticks(np.arange(0,len(s.index.levels[0]),1),s.index.levels[0])
plt.xticks(np.arange(0,len(s.index.levels[0]),1),s.index.levels[0],rotation=45);
plt.show()
exit();
|
7,877 | df518fd719b7eafffd8fee92c926d4d24b65ce18 | import os
import json
import pathlib
from gql import gql, Client
from gql.transport.aiohttp import AIOHTTPTransport
# Select your transport with a defined url endpoint
transport = AIOHTTPTransport(url="https://public-api.nbatopshot.com/graphql")
# Create a GraphQL client using the defined transport
client = Client(transport=transport, fetch_schema_from_transport=True)
# Set the directory name
DIR = "graphqlData"
# Set Query counter
count = 0
# set ids
setsId_s1 = [
"28eddc41-6a11-4ff8-8ec6-15041aa01e80",
"c561f66b-5bd8-451c-8686-156073c3fb69",
"a3a4f935-4d05-4166-b8c4-ce8e52eb3ca3",
"7b797690-5b53-45a7-b972-bd2d5152654a",
"12a8288a-addc-4e5c-8af7-b3ba6e5161d4",
"a494c64e-9e93-418c-8934-f331ee47a39b",
"feead270-621c-4cde-baac-2f6834e9e594",
"d2378dc1-1168-410b-893d-e084170a402e",
"a156f083-e902-49d3-a113-bd61702c336a",
"d4712d31-9030-40de-b1a6-1fb9964348f3",
"5f85e04f-798f-434c-89d4-2b0a575bd652",
"252e83ac-b3a4-407e-82d2-138beb60b5b9",
"9c8202c7-698b-4f44-b029-b70ddc49e9dc",
"dd7c595c-5a1b-4f43-8493-db0a2bbcc5aa",
"3a0ae6ce-f22e-4d98-b1fe-906f859df983",
"4e166b27-3099-44c3-9de3-cac2b9751692",
"18b2d80e-d38d-4678-9b7f-c2bfb223259e",
"2dbc545a-25a5-4208-8e89-bbb6c3e3a364",
"2ab08547-9f62-4ff4-aff9-1bdc0de8fa3e",
"320cae53-d585-4e74-8a66-404fa3543c19",
"814c5183-596f-41d7-9135-c6b29faa9c6d",
"b73fe6f1-ae28-468b-a4b3-4adb68e7d6bc",
"827f9328-03aa-4cb5-97cd-7b5f2c2386fd"
]
setsId_s2 = [
"757f23fd-f7ae-465c-a006-f09dcfd5dbd5",
"496d52b8-8d6c-4071-8672-d18551f86a3e",
"208ae30a-a4fe-42d4-9e51-e6fd1ad2a7a9",
"122b048d-585e-4c63-8275-c23949576fd6",
"708a6f60-5c93-406e-854f-50dd6734c0dd",
"f493db4a-a775-4d6e-be8a-56fae509a92d",
"0a528e81-5bb0-4bf8-a7f9-6dbd183528ce",
"737f9997-8817-4a74-9c13-88b99c37d118",
"b2605806-0d47-439f-ba72-784897470bb0",
"33a4a3a3-a32c-4925-a4e8-7d24e56b105e",
"54bc2e0d-91e9-4f4c-9361-a8d7eeefe91e",
"ad8e85a4-2240-4604-95f6-be826966d988"
]
setsIdList = [setsId_s1, setsId_s2]
# Make a directory if not exist
pathlib.Path(DIR).mkdir(parents=True, exist_ok=True)
print("--------Query Topshot GraphQL Endpoint--------")
# Provide a GraphQL query
for setsId in setsIdList:
for setId in setsId:
count += 1
query = gql(
"""
{
getSet (input: {setID: "%s"}) {
set {
id
flowId
flowName
flowSeriesNumber
plays {
id
description
flowID
stats {
playerName
playCategory
primaryPosition
}
}
}
}
}
""" % setId
)
# Execute the query on the transport
result = client.execute(query)
# Configure json filename and save path
setName = result["getSet"]["set"]["flowName"]
setSeries = result["getSet"]["set"]["flowSeriesNumber"]
setName += " S" + str(setSeries) + ".json"
path = os.path.join(DIR, setName)
# Write files to save path
with open(path, 'w') as outfile:
json.dump(result, outfile, indent=4)
print(f"Finished writing file: {setName}")
print()
print(f"Total query: {count}")
print("--------Querying COMPLETED.--------")
print()
|
7,878 | ccc2a976d06e2fa6c91b25c4f95a8f0da32e9b5e |
"""
Author: Yudong Qiu
Functions for solving unrestricted Hartree-Fock
"""
import numpy as np
from qc_python import basis_integrals
from qc_python.common import chemical_elements, calc_nuclear_repulsion
def solve_unrestricted_hartree_fock(elems, coords, basis_set, charge=0, spinmult=1, maxiter=150, enable_DIIS=True, verbose=False):
""" Unrestricted Hartree-Fock """
# Compute the number of electrons in the system
n_electron = sum(chemical_elements.index(e) for e in elems) - charge
if verbose:
print("This system has a total of %d electrons" % n_electron)
n_single_e = spinmult - 1
if (n_electron+n_single_e) % 2 != 0:
raise RuntimeError("The specified charge %d and spinmult %d is impossible!" % (charge, spinmult))
# number of alpha and beta orbitals
n_a = int((n_electron + n_single_e) / 2)
n_b = n_electron - n_a
# compute nuclear repulsion energy
E_nuc = calc_nuclear_repulsion(elems, coords)
# compute one-electron integral matrices
Smat, Tmat, Vmat = basis_integrals.build_one_e_matrices(elems, coords, basis_set)
Hmat = Tmat + Vmat
# check if we have enough basis functions to hold all the electrons
if n_a > len(Smat):
raise RuntimeError("Number of basis functions is smaller than number of alpha orbitals")
# build two-electron integral tensor g = (pq|rs)
G_ao = basis_integrals.build_two_electron_tensor(elems, coords, basis_set)
if verbose:
print("One Electron Integrals Calculated:")
print("\nOverlap matrix S")
print(Smat)
print("\nKinetic energy matrix T")
print(Tmat)
print("\nNuclear attraction matrix V")
print(Vmat)
print("\nCore Hamiltonian matrix H = T + V")
print(Hmat)
print("\nTwo-electron Integrals G")
print(G_ao)
# Solve the FC = ESC equation by converting it to Ft C' = E C'
# Diagonalize overlap matrix and form S^(-1/2) matrix
Seigval, Seigvec = np.linalg.eig(Smat)
Shalf = np.diag(Seigval**-0.5)
Shalf = np.dot(Seigvec, np.dot(Shalf, Seigvec.T))
# intial guess density
Dmat_a = np.zeros_like(Smat)
Dmat_b = np.zeros_like(Smat)
E_hf = 0
converged = False
# DIIS
if enable_DIIS is True:
n_err_mat = 6
diis_start_n = 4
diis_err_mats = []
#diis_err_mats_a = []
#diis_err_mats_b = []
diis_fmats_a = []
diis_fmats_b = []
if verbose:
print(" *** DIIS Enabled ***")
if verbose:
print("\n *** SCF Iterations *** ")
print("Iter HF Energy delta E RMS |D|")
print("-------------------------------------------------------")
for i in range(maxiter):
Fmat = Hmat + np.einsum("rs,pqrs->pq",(Dmat_a+Dmat_b),G_ao)
Fmat_a = Fmat - np.einsum("rs,prqs->pq",Dmat_a,G_ao)
Fmat_b = Fmat - np.einsum("rs,prqs->pq",Dmat_b,G_ao)
if enable_DIIS and i > 0:
## DIIS for alpha spin
#FDS = np.einsum("pi,ij,jq->pq",Fmat_a,Dmat_a,Smat)
#SDF = np.einsum("pi,ij,jq->pq",Smat,Dmat_a,Fmat_a)
#diis_err_mats_a.append(FDS-SDF)
#diis_err_mats_a = diis_err_mats_a[-n_err_mat:]
diis_fmats_a.append(Fmat_a)
diis_fmats_a = diis_fmats_a[-n_err_mat:]
## DIIS for beta spin
#FDS = np.einsum("pi,ij,jq->pq",Fmat_b,Dmat_b,Smat)
#SDF = np.einsum("pi,ij,jq->pq",Smat,Dmat_b,Fmat_b)
#diis_err_mats_b.append(FDS-SDF)
#diis_err_mats_b = diis_err_mats_b[-n_err_mat:]
diis_fmats_b.append(Fmat_b)
diis_fmats_b = diis_fmats_b[-n_err_mat:]
FDS_a = np.einsum("pi,ij,jq->pq",Fmat_a,Dmat_a,Smat)
SDF_a = np.einsum("pi,ij,jq->pq",Smat,Dmat_a,Fmat_a)
FDS_b = np.einsum("pi,ij,jq->pq",Fmat_b,Dmat_b,Smat)
SDF_b = np.einsum("pi,ij,jq->pq",Smat,Dmat_b,Fmat_b)
diis_err_mats.append(FDS_a-SDF_a+FDS_b-SDF_b)
diis_err_mats = diis_err_mats[-n_err_mat:]
# compute Bmat_ij = Err_i . Err_j
n_diis = len(diis_err_mats)
if n_diis >= diis_start_n:
Fmat_a = DIIS_extrapolate_F(diis_err_mats, diis_fmats_a)
Fmat_b = DIIS_extrapolate_F(diis_err_mats, diis_fmats_b)
# solve the alpha HF equation F_a C_a = e_a S C_a
Feigval_a, Cmat_a = solve_FCeSC(Fmat_a, Shalf)
C_occ = Cmat_a[:, :n_a]
Dmat_a_new = np.dot(C_occ, C_occ.T)
# solve the beta HF equation F_b C_b = e_b S C_b
Feigval_b, Cmat_b = solve_FCeSC(Fmat_b, Shalf)
C_occ = Cmat_b[:, :n_b]
Dmat_b_new = np.dot(C_occ, C_occ.T)
E_hf_new = 0.5 * (np.einsum("pq,pq", Dmat_a, Hmat+Fmat_a) + np.einsum("pq,pq", Dmat_b, Hmat+Fmat_b))
dE = E_hf_new - E_hf
D_rms = np.sqrt(np.mean((Dmat_a_new-Dmat_a)**2)) + np.sqrt(np.mean((Dmat_b_new-Dmat_b)**2))
# update E_hf and Dmat
E_hf = E_hf_new
Dmat_a = Dmat_a_new
Dmat_b = Dmat_b_new
# print iteration information
if verbose is True:
print(" %-4d %17.10f %14.4e %14.4e" %(i, E_hf, dE, D_rms))
# check convergence
if abs(dE) < 1.0E-10 and abs(D_rms) < 1.0E-8:
converged = True
break
if converged == False:
print("SCF didn't converge in %d iterations!" % maxiter)
raise RuntimeError
E_total = E_nuc + E_hf
if verbose:
print("\nSCF converged!")
print("\nOrbital Energies (Eh) and coefficients for Alpha electrons")
print('E: '+''.join(["%17.7f"%e for e in Feigval_a]))
print('-' * (17 * len(Feigval_a) + 4))
for i,row in enumerate(Cmat_a):
print('c%-3d'%i + ''.join(["%17.7f"%c for c in row]))
print("\nOrbital Energies (Eh) and coefficients for Beta electrons")
print('E: '+''.join(["%17.7f"%e for e in Feigval_b]))
print('-' * (17 * len(Feigval_b) + 4))
for i,row in enumerate(Cmat_b):
print('c%-3d'%i + ''.join(["%17.7f"%c for c in row]))
print("\nNuclear Repulsion Energy = %17.10f Eh" % E_nuc)
print("Total Electronic Energy = %17.10f Eh" % E_hf)
print("Final Total Energy = %17.10f Eh" % E_total)
return {"E_nuc":E_nuc, "E_hf": E_hf, "E_total":E_total, "E_orbs_a": Feigval_a, "Cmat_a": Cmat_a, "Dmat_a": Dmat_a,
"E_orbs_b": Feigval_b, "Cmat_b": Cmat_b, "Dmat_b": Dmat_b}
def solve_FCeSC(Fmat, Shalf):
Ft = np.einsum("pi,ij,jq->pq",Shalf,Fmat,Shalf)
Feigval, Feigvec = np.linalg.eigh(Ft)
idx = Feigval.argsort()
Feigval = Feigval[idx]
Feigvec = Feigvec[:,idx]
Cmat = np.dot(Shalf, Feigvec)
return Feigval, Cmat
def DIIS_extrapolate_F(diis_err_mats, diis_fmats):
n_diis = len(diis_err_mats)
assert n_diis == len(diis_fmats), 'Number of Fock matrices should equal to number of error matrices'
Bmat = -np.ones([n_diis+1, n_diis+1])
for di in range(n_diis):
for dj in range(di, n_diis):
Bmat[di,dj] = Bmat[dj,di] = np.dot(diis_err_mats[di].ravel(), diis_err_mats[dj].ravel())
Bmat[-1,-1] = 0
# Solve the equation Bmat * C = [0,0,..,-1]
right_vec = np.zeros(n_diis+1)
right_vec[-1] = -1
C_array = np.linalg.solve(Bmat, right_vec)
# Form the new guess Fmat
new_Fmat = np.zeros_like(diis_fmats[-1])
for di in range(n_diis):
new_Fmat += C_array[di] * diis_fmats[di]
return new_Fmat |
7,879 | 0ceb9eac46e3182821e65a1ae3a69d842db51e62 |
STATUS_DISCONNECT = 0
STATUS_CONNECTED = 1
STATUS_OPEN_CH_REQUEST = 2
STATUS_OPENED = 3
STATUS_EXITING = 4
STATUS_EXITTED = 5
CONTENT_TYPE_IMAGE = 0
CONTENT_TYPE_VIDEO = 1
STATUS_OK = 0
STATUS_ERROR = 1
class Point(object):
def __init__(self, x = 0, y = 0):
self.x = x
self.y = y
class ObjectDetectionResult(object):
def __init__(self, ltx = 0, lty = 0, rbx = 0, rby = 0, text = None):
self.object_class = 0
self.confidence = 0
self.lt = Point(ltx, lty)
self.rb = Point(rbx, rby)
self.result_text = text
def IsRectInvalid(self):
return ((self.lt.x < 0) or \
(self.lt.y < 0) or \
(self.rb.x < 0) or \
(self.rb.y < 0) or \
(self.lt.x > self.rb.x) or \
(self.lt.y > self.rb.y))
|
7,880 | f7283750923e1e430ff1f648878bbb9a0c73d2c4 | from settings import *
helpMessage = '''
**Vocal / Musique**
`{0}join`
Va rejoindre le salon vocale dans laquelle vous êtes.
`{0}leave`
Va partir du salon vocale dans laquelle vous êtes.
`{0}play [YouTube Url]` *ou* `{0}play [musique ou video à rechercher]`
Commencera à jouer l'audio de la vidéo / chanson fournie.
`{0}pause`
Mettra en pause le flux audio actuel.
`{0}resume`
Va reprendre le flux audio actuel.
`{0}stop`
Arrêter et terminer le flux audio.
~~**=========================================**~~
**Administrateur**
`{0}invite`
Envoie un message personnel avec le lien d'invitation du bot. (Ne fonctionnera que pour le propriétaire du bot.)
`{0}shutdown`
Va faire la déconnexion et l'arrêt du bot. (Ne fonctionnera que pour le propriétaire du bot.)
`{0}status [status here]`
Définira le statut de jeu du bot. Ne fonctionnera que pour le propriétaire du bot. (Ne fonctionnera que pour le propriétaire du bot.)
~~**=========================================**~~
**Mini-Games**
`{0}joke`
Postes une blague aléatoire Chuck Norris.
`{0}8ball`
Pose n'importe quelle question à 8-Ball.
`{0}coinflip`
Va retourner une pièce et afficher le résultat.
`{0}roll [# of dice] D[# of sides] Example: !roll 3 D6`
Va lancer les dés spécifiés et poster le résultat.
`{0}slots`
Va poster un résultat de machine à sous.
~~**=========================================**~~
**Random Commandes**
`{0}cat`
Va poster une image de chat aléatoire ou gif.
`{0}catfact (ACTUELLEMENT HORS DE COMMANDE INDISPONIBLE)`
Va poster un fait de chat au hasard.
`{0}catgif`
Va poster un gif de chat aléatoire.
`{0}dog`
Va poster une image de chien aléatoire.
`{0}rabbit`
Va poster une image de lapin aléatoire.
`{0}face`
Poste un visage random depuis une DB de +270 visages
~~**=========================================**~~
**Jeux**
`{0}hots [hotslogs player ID]` - Example: !hots 3141592
Publiera le MMR du joueur pour le match rapide et la ligue des héros.
`{0}gwent [Nom de la Carte]` - Example: !gwent Geralt
Va poster la description de la carte et l'image de la carte gwent. A une longueur de recherche maximale de 10 caractères.'''.format(config.COMMANDPREFIX) |
7,881 | 5e14eeaa3c79bfdd564f3bfd1575c9bbf1a3773d | """Command generator for running a script against a BigQuery cluster.
Contains the method to compile the BigQuery specific script execution command
based on generic arguments (sql script, output destination) and BigQuery
specific arguments (flag values).
"""
__author__ = 'p3rf@google.com'
from absl import flags
flags.DEFINE_string('bq_project_id', None, 'Project Id which contains the query'
' dataset and table.')
flags.DEFINE_string('bq_dataset_id', None, 'Dataset Id which contains the query'
' table.')
flags.mark_flags_as_required(['bq_project_id', 'bq_dataset_id'])
FLAGS = flags.FLAGS
def generate_provider_specific_cmd_list(script, driver, output, error):
"""Method to compile the BigQuery specific script execution command.
Arguments:
script: SQL script which contains the query.
driver: Driver that contains the BigQuery specific script executor.
output: Output log file.
error: Error log file.
Returns:
Command list to execute the supplied script.
"""
cmd_list = [driver, FLAGS.bq_project_id, FLAGS.bq_dataset_id,
script, output, error]
return cmd_list
|
7,882 | e877f16e604682488d85142174ce4f3f6cee3f18 | from sys import argv
from pyspark import SparkContext
import json
import re
import math
from _datetime import datetime
start_time = datetime.now()
input_file = argv[1]
model_file = argv[2]
stopwords = argv[3]
sc = SparkContext(appName='inf553')
lines = sc.textFile(input_file).map(lambda x: json.loads(x))
stopwords = sc.textFile(stopwords).map(lambda x: (x, 1))
def tf_idf(words):
word_dict = {}
for w in words:
if w in word_dict.keys():
word_dict[w] += 1
else:
word_dict[w] = 1
max_freq = max(word_dict.values())
for w in words:
word_dict[w] = (word_dict[w] / max_freq) * math.log((N / n_dict[w]), 2)
a = sorted(word_dict.items(), key=lambda x: x[1], reverse=True)
return a[:200]
b_text = lines.map(lambda x: (x['business_id'], x['text']))\
.groupByKey().map(lambda x: (x[0], list(x[1])))\
.map(lambda x: (x[0], str(x[1]).replace('!\'', '')))\
.map(lambda x: (x[0], x[1].replace('.\'', ''))) \
.map(lambda x: (x[0], x[1].replace(', \'', ''))) \
.map(lambda x: (x[0], x[1].replace('\\n',''))) \
.map(lambda x: (x[0], x[1].replace('\\\'',"'")))\
.map(lambda x: (x[0], re.sub('[{}+=~*%#$@(\-/[,.!?&:;\]0-9)"]', ' ', str(x[1]).lower()))) \
.mapValues(lambda x: x.split())
total_words_num = b_text.flatMap(lambda x: x[1]).count()
rare_words = b_text.flatMap(lambda x: x[1])\
.map(lambda x: (x, 1))\
.reduceByKey(lambda x, y: x+y)\
.filter(lambda x: x[1] < total_words_num * 0.000001)\
.map(lambda x: (x[0], 1))
b_unset_words = b_text.flatMap(lambda x: [(word, x[0]) for word in x[1]])\
.subtractByKey(rare_words)\
.subtractByKey(stopwords)
n = b_unset_words.groupByKey()\
.map(lambda x: (x[0], len(set(x[1]))))
n_dict = dict(n.collect())
N = b_text = lines.map(lambda x: (x['business_id'])).distinct().count()
b_profile = b_unset_words.map(lambda x: (x[1], x[0]))\
.groupByKey().map(lambda x: (x[0], list(x[1])))\
.map(lambda x: (x[0], tf_idf(x[1]))) \
.map(lambda x: (x[0], [word[0] for word in x[1]]))
words_list = b_profile.flatMap(lambda x: x[1]).distinct().collect()
words = dict([(word, ind) for ind, word in enumerate(words_list)])
b_profile2 = b_profile.map(lambda x: (x[0], [words[word_ind] for word_ind in x[1]]))
b_profile_dict = dict(b_profile2.collect())
def user_prof(b_list):
u_profile_words =[]
for b in b_list:
u_profile_words.extend(b_profile_dict[b])
return list(set(u_profile_words))
user_profile = lines.map(lambda x: (x['user_id'], x['business_id']))\
.groupByKey().map(lambda x: (x[0], list(x[1])))\
.map(lambda x: (x[0], user_prof(x[1])))
f = open(model_file, "w")
for user, u_vector in dict(user_profile.collect()).items():
f.write(json.dumps({"id": user, "type": "user", "vector": u_vector}))
f.write('\n')
for business, b_vector in b_profile_dict.items():
f.write(json.dumps({"id": business, "type": "business", "vector": b_vector}))
f.write('\n')
end_time = datetime.now()
duration = end_time - start_time
print("Duration:", duration)
|
7,883 | b849a2902c8596daa2c6da4de7b9d1c07b34d136 | # Generate some object patterns as save as JSON format
import json
import math
import random
from obstacle import *
def main(map):
obs = []
for x in range(1,35):
obs.append(Obstacle(random.randint(0,map.getHeight()), y=random.randint(0,map.getWidth()), radius=20).toJsonObject())
jsonOb={'map': {'obstacle': obs}}
print jsonOb
F = open('testDump.json', 'w')
json.dump(jsonOb, F, indent=4, separators=(',', ': '))
F.close()
if __name__ == '__main__':
main() |
7,884 | b9a75f4e106efade3a1ebdcfe66413107d7eccd0 | from distutils.core import setup
setup(
name='dcnn_visualizer',
version='',
packages=['dcnn_visualizer', 'dcnn_visualizer.backward_functions'],
url='',
license='',
author='Aiga SUZUKI',
author_email='tochikuji@gmail.com',
description='', requires=['numpy', 'chainer', 'chainercv']
)
|
7,885 | 1cdd315eec6792a8588dc2e6a221bc024be47078 | import pygame
import textwrap
import client.Button as Btn
from client.ClickableImage import ClickableImage as ClickImg
from client.CreateDisplay import CreateDisplay
import client.LiverpoolButtons as RuleSetsButtons_LP
import client.HandAndFootButtons as RuleSetsButtons_HF
import client.HandManagement as HandManagement
from client.UICardWrapper import UICardWrapper
import client.UIConstants as UIC
from common.Card import Card
class HandView:
"""This class handles player's cards and enables actions.
Actions are primarily performed using buttons, since these need to somewhat customized by game
the buttons are in ***.py (*** is Liverpool or HandAndFoot) and are imported as RuleSetsButtons.
Management of displaying the hand's cards is not game specific, and methods that help with that
are in HandManagement.py.
Player can arrange their own hand, and prepare to play cards during other players' turns.
"""
def __init__(self, controller, display, ruleset):
self.controller = controller
self.display = display
self.ruleset = ruleset
self.Meld_Threshold = controller._state.rules.Meld_Threshold
self.deal_size = controller._state.rules.Deal_Size
self.help_text = controller._state.rules.help_text
if ruleset == 'Liverpool':
self.buttons_per_player = self.Meld_Threshold[0][0] + self.Meld_Threshold[0][1]
self.RuleSetsButtons = RuleSetsButtons_LP
elif ruleset == 'HandAndFoot':
self.RuleSetsButtons = RuleSetsButtons_HF
self.hand_scaling = (UIC.scale, UIC.Card_Spacing)
self.current_hand = []
self.last_hand = []
self.hand_info = [] # will contain UICardWrapped elements of current_hand
self.prepared_cards = [] # will contain list of prepared cards from controller
self.discards = []
self.discard_confirm = False
# num_wilds is HandAndFoot specific, only non-zero if by prepare_card_btn in HandAndFootButtons.py is triggered.
self.num_wilds = 0
self.wild_cards = []
self.selected_list = []
self.round_index = 0
self.round_advance = False
self.num_players = 1
# In Liverpool and other Shared_Board games: prepare cards buttons must be updated each round
self.need_updated_buttons = True
self.ready_color_idx = 2
self.not_ready_color_idx = 6
#
# if someone joins between rounds, then they won't know the correct meld requirement until the round begins.
# (self.controller._state.round = -1 until play commences).
# In HandAndFoot: Correct meld requirement will be written in lower right corner once play commences.
# In Liverpool: Will see correct buttons once round commences.
self.RuleSetsButtons.CreateButtons(self)
def update(self, player_index=0, num_players=1, visible_scards = []):
"""This updates the view of the hand, between rounds it displays a message. """
self.visible_scards = visible_scards
self.controller._state.player_index = player_index
if self.num_players > num_players and self.controller._state.rules.Shared_Board \
and not self.need_updated_buttons:
# A player has left the game after the round has begun -- make adjustments so game can continue.
self.playerLeftGame(num_players)
self.num_players = num_players
if self.controller._state.round == -1:
self.mesgBetweenRounds(self.help_text)
if self.round_advance:
self.round_index = self.round_index + 1
if self.round_index < len(self.Meld_Threshold):
self.help_text[0] = 'This is the round of ' + str(self.Meld_Threshold[self.round_index]) + ' ! '
self.need_updated_buttons = True # used for Liverpool.
else:
self.help_text = ['Game has concluded. Scores for each round can be found in command window.']
self.round_advance = False
else:
if not self.round_index == self.controller._state.round:
# Need this to true up round_index if a player joins mid-game.
skipped_rounds = self.controller._state.round - self.round_index
for idx in range(skipped_rounds):
#todo: How to score latecomers should be moved to ruleset.
score = 0
self.controller.lateJoinScores(score)
self.round_index = self.controller._state.round
self.round_advance = True
# reset outline colors on ready buttons to what they need to be at the start of the "between rounds" state.
self.ready_color_idx = 2
self.not_ready_color_idx = 6
self.last_hand = self.current_hand
self.current_hand = self.controller.getHand()
if len(self.current_hand) == 0:
self.hand_info = []
elif not self.last_hand == self.current_hand:
self.hand_info = HandManagement.WrapHand(self, self.current_hand, self.hand_info)
HandManagement.ShowHolding(self, self.hand_info) # displays hand
self.RuleSetsButtons.ButtonDisplay(self)
def nextEventWildsOnBoard(self):
"""This runs instead of most of nextEvent when Shared_Board is True and there are ambiguous wild cards.
It is looking for key strokes to designate ambiguous wild cards in runs.
The mouse is ignored until you designate all the wilds (turn phase goes back to play)."""
if self.controller._state.rules.Shared_Board and self.num_wilds > 0:
for self.event in pygame.event.get():
if self.event.type == pygame.QUIT:
# The window crashed, we should handle this
print("pygame crash, AAAHHH")
pygame.quit()
quit()
else:
# in Shared_Board games, check if there are wilds that need to be updated.
# All other events are ignored until play is finished.
HandManagement.wildsHiLoGetInput(self)
def nextEvent(self):
"""This submits the next user input to the controller,
In games with Shared_Board = False (e.g. HandAndFoot) key strokes don't do anything
unless designating values for prepared wild cards, at which time the mouse is ignored
unless you want to clear the prepared cards.
In games with Shared_Board = True wilds on board might change designation upon other cards being played.
IF designation cannot be handled automatically (= if wild can be at the beginning or end of a run) then
it must be designated before play is completed.
This is done in nextEvenWildsOnBoard. All other events are ignored until num_wilds == 0 OR play is canceled."""
if self.controller._state.rules.Shared_Board:
self.num_wilds = len(self.controller.unassigned_wilds_dict.keys())
if self.num_wilds > 0:
self.nextEventWildsOnBoard()
for self.event in pygame.event.get():
if self.event.type == pygame.QUIT:
# The window crashed, we should handle this
print("pygame crash, AAAHHH")
pygame.quit()
quit()
if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:
wild_instructions = 'Use the keyboard to designate your prepared wild cards \r\n '
wild_instructions = wild_instructions + '(use 0 for 10 and J, Q, or K for facecards).'
self.controller.note = wild_instructions
pos = pygame.mouse.get_pos()
if self.event.type == pygame.MOUSEBUTTONDOWN:
self.RuleSetsButtons.ClickedButton(self, pos)
for element in self.hand_info:
# cannot select prepared cards, so not included in logic below.
if element.img_clickable.isOver(pos):
if element.status == 1:
element.status = 0
element.img_clickable.changeOutline(0)
elif element.status == 0:
element.status = 1
element.img_clickable.changeOutline(2)
elif self.event.type == pygame.MOUSEMOTION:
self.RuleSetsButtons.MouseHiLight(self, pos)
HandManagement.MouseHiLight(self.hand_info, pos)
elif self.event.type == pygame.KEYDOWN:
if self.controller._state.rules.Buy_Option:
if self.controller.buying_opportunity:
if self.event.key == pygame.K_y:
self.controller.wantTopCard(True)
self.controller.note = 'You have signaled you want to buy the card.'
elif self.event.key == pygame.K_n:
self.controller.wantTopCard(False)
self.controller.note = 'You have signaled you do not want to buy the card.'
if not self.controller._state.rules.Shared_Board and self.num_wilds > 0:
HandManagement.ManuallyAssign(self)
def gatherSelected(self):
""" gathers selected cards
in order to take action on selected cards (either discarding them or preparing them)
"""
self.selected_list = []
for element in self.hand_info:
if element.status == 1:
self.selected_list.append(element)
return self.selected_list
def discardConfirmation(self, confirmed, wrapped_discards):
""" Confirm a user is sure about a discard and then perform it once confirmed."""
discards = []
for element in wrapped_discards:
discards.append(element.card)
if self.discards != discards:
confirmed = False
self.discards = discards
if not confirmed:
self.controller.note = "Please confirm - discard " + "{0}".format(self.discards)
return True # ask for confirmation
else:
# confirmed is True, performing discard and removing discarded wrapped cards from hand_info.
if self.discard_confirm:
controller_response = self.controller.discard(self.discards)
if controller_response:
for element in wrapped_discards:
self.hand_info.remove(element)
return False # now that this is done, we don't have anything waiting on confirmation
def mesgBetweenRounds(self, message):
"""print message where cards usually displayed until Ready button is clicked for next round."""
font = UIC.Medium_Text
y_offset = (UIC.Disp_Height * (1 - (UIC.Hand_Row_Fraction * 0.8)))
for message_string in message:
text_surface = font.render(message_string, True, UIC.Black)
text_rect = text_surface.get_rect()
text_rect.center = ((UIC.Disp_Width * 0.5), y_offset)
y_offset = y_offset + UIC.Medium_Text_Feed
self.display.blit(text_surface, text_rect)
def labelMedium(self, labelstr, x_offset, y_offset):
font = UIC.Medium_Text
text_surface = font.render(labelstr, True, UIC.Bright_Blue)
text_rect = text_surface.get_rect()
text_rect.center = (x_offset, y_offset)
self.display.blit(text_surface, text_rect)
def playerLeftGame(self, num_players):
# a player has disconnected a game with a Shared_Board = True. Must make adjustments to
# (i) card group dictionaries, (ii) prepared cards & (iii) buttons locations.
self.controller.resetProcessedCards(self.visible_scards)
self.controller.clearPreparedCards() # so that prepared cards won't be mistakenly played on wrong group.
self.hand_info = HandManagement.ClearPreparedCardsInHandView(self.hand_info)
self.controller.note = "A player has left the game, all prepared cards are automatically cleared."
# reset set/run button locations:
if num_players > 1:
players_sp_w = UIC.Disp_Width / num_players
else:
players_sp_w = UIC.Disp_Width
for idx in range(num_players):
for button in self.assign_cards_btns[idx]:
button.x = 10 + (players_sp_w * idx)
|
7,886 | dc97703d39e7db29e0ba333c2797f4be6d015fd7 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 16 21:26:03 2018
@author: Brandon
"""os.getcwd()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'os' is not definimport os
>>> os.getcwd()
'C:\\Users\\Brandon\\AppData\\Local\\Programs\\Python\\Python36-32'
>>> os.chdir()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: Required argument 'path' (pos 1) not found
>>> os.chdir()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: Required argument 'path' (pos 1) not found
>>>
>>> os.chdir("C:\\Users\\Brandon\Documents")
>>> os.getcwd()
'C:\\Users\\Brandon\\Documents'
>>> os.makedirs()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: makedirs() missing 1 required positional argument: 'name'
>>> os.makedirs("yu")
>>> os.chdir("\\yu")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FileNotFoundError: [WinError 2] The system cannot find the file specified: '\\yu'
>>> os.chdir(".\\yu")
>>> os.getcwd()
'C:\\Users\\Brandon\\Documents\\yu'
>>> os.path.getsize(yu)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'yu' is not defined
>>> os.path.getsize()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: getsize() missing 1 required positional argument: 'filename'
>>> os.path.getsize()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: getsize() missing 1 required positional argument: 'filename'
>>> os.path.exists()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: exists() missing 1 required positional argument: 'path'
>>> os.path.exits("C:\\Users\\Brandon\\Documents\\yu")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: module 'ntpath' has no attribute 'exits'
>>> os.path.exists("C:\\Users\\Brandon\\Documents\\yu")
True
>>>
|
7,887 | 94ca18088664393fdfdc68bfb8bcad8b78e9e36a | # bot.py
import os
import shutil
import discord
import youtube_dl
from discord.ext import commands
import urllib.parse
import urllib.request
import re
import dotenv
from pathlib import Path # Python 3.6+ only
from dotenv import load_dotenv
env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path)
client = discord.Client()
botCommand = commands.Bot(command_prefix='.')
token = os.getenv("DISCORD_TOKEN")
players = {}
@botCommand.event
async def on_ready():
print(
f'{client.user} is connected to the following guild:\n'
)
@botCommand.command(pass_context=True, aliases=['y'])
async def youtube(ctx, *, search):
query_string = urllib.parse.urlencode({
'search_query': search
})
htm_content = urllib.request.urlopen(
'http://www.youtube.com/results?' + query_string
)
print(r'/watch\?v=(.{11})')
search_results = re.findall(r'/watch\?v=(.{11})', htm_content.read().decode('utf-8'))
await ctx.send('http://www.youtube.com/watch?v=' + search_results[0])
voice = None
q_num = 0
@botCommand.command(pass_context=True, aliases=['p', 'play'])
async def plays(ctx, *, url):
server = ctx.message.guild
global voice
channel = ctx.message.author.voice.channel
if not str(url).startswith('http'):
query_string = urllib.parse.urlencode({
'search_query': url
})
htm_content = urllib.request.urlopen(
'http://www.youtube.com/results?' + query_string
)
print(r'/watch\?v=(.{11})')
search_results = re.findall(r'/watch\?v=(.{11})', htm_content.read().decode('utf-8'))
url = 'http://www.youtube.com/watch?v=' + search_results[0]
if voice:
print("ok")
else:
voice = await channel.connect()
await ctx.send(f"Joined {channel}")
# if voice is None:
# voice = await channel.connect()
# song_there = os.path.isfile("song.mp3")
def check_queue():
print('Test')
Queue_infile = os.path.isdir("./Queue")
if Queue_infile is True:
DIR = os.path.abspath(os.path.realpath("Queue"))
length = len(os.listdir(DIR))
still_q = length - 1
try:
first_file = os.listdir(DIR)[0]
except:
print("No more queue\n")
queues.clear()
return
main_location = os.path.dirname(os.path.realpath(__file__))
song_path = os.path.abspath(os.path.realpath("Queue") + "\\" + first_file)
if length != 0:
print("Song done , playing next queue\n")
print(f"song still in queue: {still_q}")
song_there = os.path.isfile("song.mp3")
if song_there:
os.remove("song.mp3")
shutil.move(song_path, main_location)
for file in os.listdir("./"):
if file.endswith(".mp3"):
os.rename(file, 'song.mp3')
voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda e: check_queue())
voice.source = discord.PCMVolumeTransformer(voice.source)
voice.source.volume = 0.07
else:
queues.clear()
return
else:
queues.clear()
print("No song founds")
def add_queue():
print("Test")
Queue_infile = os.path.isdir("./Queue")
if Queue_infile is False:
os.mkdir("Queue")
DIR = os.path.abspath(os.path.realpath("Queue"))
q_num = len(os.listdir(DIR))
q_num += 1
add_queue = True
while add_queue:
if q_num in queues:
q_num += 1
else:
add_queue = False
queues[q_num] = q_num
queue_path = os.path.abspath(os.path.realpath("Queue") + f"\song{q_num}.%(ext)s")
ydl_opts = {
'format': 'bestaudio/best',
'quiet': True,
'outtmpl': queue_path,
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192'
}],
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
print("Downloading audio now\n")
ydl.download([url])
print("Song added to queue\n")
song_there = os.path.isfile("song.mp3")
try:
if song_there:
os.remove("song.mp3")
queues.clear()
print("remove old song file")
except PermissionError:
add_queue()
await ctx.send("Adding song to the queue")
return
Queue_infile = os.path.isdir("./Queue")
try:
Queue_folder = "./Queue"
if Queue_infile is True:
print("Removed old Queue folder")
shutil.rmtree(Queue_folder)
except:
print("No old queue folder")
await ctx.send("Getting everything ready now")
# voice = get(client.voice_clients, guild=ctx.guild)
ydl_opts = {
'format': 'bestaudio/best',
'quiet': True,
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192'
}],
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
print("Downloading audio now\n")
ydl.download([url])
for file in os.listdir("./"):
if file.endswith(".mp3"):
name = file
print(f"renamed file : {file}\n")
os.rename(file, "song.mp3")
voice.play(discord.FFmpegPCMAudio('song.mp3'), after=lambda e: check_queue())
voice.source = discord.PCMVolumeTransformer(voice.source)
voice.source.volume = 0.07
nname = name.rsplit("-", 1)
await ctx.send(f"Playing :notes: `{nname[0]}` :notes:")
print("Playing\n")
queues = {}
@botCommand.command(pass_context=True)
async def ping(ctx):
await ctx.send('test')
@botCommand.command(pass_context=True)
async def join(ctx):
global vc
channel = ctx.message.author.voice.channel
vc = channel.connect()
await channel.connect()
@botCommand.event
async def on_message(message):
if message.author == client.user:
return
msg1 = '<@333863300892721152> davis kok pepe ya'
if message.content == 'command list':
await message.channel.send('- davis mah\n- davis\n- .plays + youtubeURL')
if message.content == 'davis mah':
for x in range(3):
await message.channel.send('davis mah paling jago')
if message.content == 'davis':
response = msg1
for x in range(3):
await message.channel.send(response)
if message.content == 'bel sama jessica':
response = 'jessica lah , https://imgur.com/TrtyIVa'
await message.channel.send(response)
if message.content == 'ig jessica':
response = 'https://www.instagram.com/h.yojeong/'
await message.channel.send(response)
await botCommand.process_commands(message)
botCommand.run(token)
|
7,888 | 4545ce36c4d3df50e263d3323c04c53acb2b50e0 | #!/usr/bin/env python3
import csv
import math
def load_data_from_file(filename):
"""
Load that data, my dude(tte)
:param filename: The file from which you want to load data
:return: Time and position data of the file
"""
time = []
position = []
with open(filename, 'r') as original:
time_position = list(csv.reader(original)) # list()
for row in range(1, len(time_position)):
time.append(float(time_position[row][0]))
position.append(float(time_position[row][1]))
return time, position
def greater_than_index(numlist, singnum):
"""
Function takes in a list of ints, compares them to a single int and returns the index value at which the
list encounters a value greater than, or equal to, the value of interest.
:param numlist: The list of ints
:param singnum: The int to compare the list to
:return: The index value of the position >= value of interest
"""
try:
for elem in numlist:
if elem >= singnum:
e_val = numlist.index(elem)
return e_val
except ValueError:
return 'None. Try a value contained within the list.'
def less_than_index(numlist, singnum):
"""
Function takes in a list of ints, compares them to a single int and returns the index value at which the
list encounters a value greater than, or equal to, the value of interest.
:param numlist: The list of ints
:param singnum: The int to compare the list to
:return: The index value of the position >= value of interest
"""
try:
for elem in numlist:
if elem <= singnum:
e_val = numlist.index(elem)
return e_val
except ValueError:
return 'None. Try a value contained within the list.'
def ini_max_fin(pos1):
c_initial = pos1[0]
c_max = max(pos1)
c_final = pos1[-1]
return c_initial, c_max, c_final
def char_ests(time_c, pos_c, c_initial, c_max, c_final):
"""
This function estimates the characteristics of the waveform we're analyzing
:param time_c: A list of time values to determine the time it takes for certain things to occur
:param pos_c: A list of position values to determine the position at certain values of time
:param c_initial: The initial position value of our waveform
:param c_max: The maximum position value of our waveform
:param c_final: The final value of our waveform
:return: Rise time (t_r), Peak time(t_p), % Overshoot(p_os_fix), Settling time (t_s).
"""
# Index values for time statements
maxdex = pos_c.index(c_max)
ten_perc = (c_final + c_initial) * 0.1
tr_10 = greater_than_index(pos_c, ten_perc)
ninety_p = (c_final + c_initial) * 0.9
tr_90 = greater_than_index(pos_c, ninety_p)
# Calculations
t_r = time_c[tr_10] - time_c[tr_90] # Rise time
t_p = time_c[maxdex] # Peak time
# Adjusted %OS eq
p_os_fix = ((c_max - c_final) / (c_final-c_initial)) * 100 # %OS
# two percent calcs
two_perc = (c_final - c_initial) * 0.02
c_thresh_low = c_final - two_perc
c_thresh_high = c_final + two_perc
mcfly = list(reversed(time_c))
beckett = list(reversed(pos_c))
minlist = [less_than_index(beckett, c_thresh_low), greater_than_index(beckett, c_thresh_high)]
t_s = mcfly[min(minlist)] # Settling time
return t_r, t_p, p_os_fix, t_s
def get_system_params(perc_os, settle_t):
"""
:param perc_os: The Overshoot Percentage value from which to calculate things
:param settle_t: The settling time from which to calculate things
:return: The mass (m_spr), spring (k_spr), and damping constants(c_spr)
"""
num_zet = -math.log(perc_os/100)
den_zet = math.sqrt(math.pi**2 + math.log(perc_os/100)**2)
zeta = num_zet/den_zet
omega = 4 / (zeta*settle_t)
m_spr = 1 # Told to assume mass is always 1 (unit)
k_spr = omega**2
c_spr = 2*zeta*omega
return m_spr, k_spr, c_spr
def analyze_data(filename):
"""
:param filename: A name for the csv file to run the resulting operations
:return: A dictionary with some gucci values
"""
backtime, backpos = load_data_from_file(filename)
c_i, c_m, c_f = ini_max_fin(backpos)
t_rise, t_peak, percos, t_set = char_ests(backtime, backpos, c_i, c_m, c_f)
m, k, c = get_system_params(percos, t_set)
dict_party = {'c_initial': c_i, 'c_max': c_m, 'c_final': c_f, 'rise_time': t_rise, 'peak_time': t_peak,
'perc_overshoot': percos, 'settling_time': t_set, 'system_mass': m, 'system_spring': k,
'system_damping': c}
true_dict = {}
for key in sorted(dict_party):
true_dict.update({key: dict_party[key]})
return true_dict
if __name__ == '__main__':
print(analyze_data('data1.csv'))
# print(analyze_data('data2.csv'))
# print(analyze_data('data3.csv'))
# print(analyze_data('data4.csv'))
|
7,889 | 3472dc0c9d00c10ab0690c052e70fbf6a4bdb13d | """Utilities for AnalysisModules."""
import inspect
from mongoengine import QuerySet
from numpy import percentile
from .modules import AnalysisModule
def get_primary_module(package):
"""Extract AnalysisModule primary module from package."""
def test_submodule(submodule):
"""Test a submodule to see if it is an AnalysisModule module."""
is_correct_subclass = issubclass(submodule, AnalysisModule)
# Ensure submodule is defined within the package we are inspecting (and not 'base')
is_correct_module = package.__name__ in submodule.__module__
return is_correct_subclass and is_correct_module
submodules = inspect.getmembers(package, inspect.isclass)
module = next(submodule for _, submodule in submodules
if test_submodule(submodule))
return module
def scrub_object(obj):
"""Remove protected fields from object (dict or list)."""
if isinstance(obj, list):
return [scrub_object(item) for item in obj]
if isinstance(obj, dict):
clean_dict = {key: scrub_object(value)
for key, value in obj.items()
if not key.startswith('_')}
return clean_dict
return obj
def jsonify(mongo_doc):
"""Convert Mongo document to JSON for serialization."""
if isinstance(mongo_doc, (QuerySet, list,)):
return [jsonify(element) for element in mongo_doc]
result_dict = mongo_doc.to_mongo().to_dict()
clean_dict = scrub_object(result_dict)
return clean_dict
def boxplot(values):
"""Calculate percentiles needed for a boxplot."""
percentiles = percentile(values, [0, 25, 50, 75, 100])
result = {'min_val': percentiles[0],
'q1_val': percentiles[1],
'mean_val': percentiles[2],
'q3_val': percentiles[3],
'max_val': percentiles[4]}
return result
def scrub_category_val(category_val):
"""Make sure that category val is a string with positive length."""
if not isinstance(category_val, str):
category_val = str(category_val)
if category_val.lower() == 'nan':
category_val = 'NaN'
if not category_val:
category_val = 'NaN'
return category_val
def collate_samples(tool_name, fields, samples):
"""Group a set of ToolResult fields from a set of samples by sample name."""
sample_dict = {}
for sample in samples:
sample_name = sample['name']
sample_dict[sample_name] = {}
tool_result = sample[tool_name]
for field in fields:
sample_dict[sample_name][field] = tool_result[field]
return sample_dict
def categories_from_metadata(samples, min_size=2):
"""
Create dict of categories and their values from sample metadata.
Parameters
----------
samples : list
List of sample models.
min_size: int
Minimum number of values required for a given metadata item to
be included in returned categories.
Returns
-------
dict
Dictionary of form {<category_name>: [category_value[, category_value]]}
"""
categories = {}
# Gather categories and values
all_metadata = [sample['metadata'] for sample in samples]
for metadata in all_metadata:
properties = [prop for prop in metadata.keys()]
for prop in properties:
if prop not in categories:
categories[prop] = set([])
category_val = metadata[prop]
category_val = scrub_category_val(category_val)
categories[prop].add(category_val)
# Filter for minimum number of values
categories = {category_name: list(category_values)
for category_name, category_values in categories.items()
if len(category_values) >= min_size}
return categories
|
7,890 | 19e387cb731dad21e5ee50b0a9812df984c13f3b | import openpyxl as opx
import pyperclip
from openpyxl import Workbook
from openpyxl.styles import PatternFill
wb = Workbook(write_only=True)
ws = wb.create_sheet()
def parseSeq(lines,seqName):
'''splits each column'''
data = []
for line in lines: data.append(line.split(' '))
'''removes any spaces'''
for i in range(len(data)):
for j in range(data[i].count('')): data[i].remove('')
'''deletes the numbers at beginning of column'''
for i in range(len(data)): del data[i][0]
'''creates a list of lists from dna sequence'''
seqRows = []
for i in range(len(data)):
seqRow = []
seqRow.append(seqName)
for j in range(len(data[i])):
for k in range(len(data[i][j])):
seqRow.append(data[i][j][k])
seqRows.append(seqRow)
return seqRows
seqs = int(input('How many DNA sequences do you want to compare? '))
saveFile = input('What do you want to name the spreadsheet? ')
'''masterList contains each sequence, and each sequence is
broken into rows'''
masterList = []
'''reads files so they can be parsed'''
for i in range(seqs):
print('What is the name of DNA sequence',i+1,end='? ')
name = input('')
file = open(name+'.txt')
info = file.readlines()
masterList.append(parseSeq(info,name))
file.close()
'''sequence that contains the most rows is used for following loop'''
elems = []
for i in range(len(masterList)): elems.append(len(masterList[i]))
bigElem = elems.index(max(elems))
'''adds dna sequence to excel spreadsheet, 60 columns, x rows'''
for row in range(len(masterList[bigElem])):
for seq in range(len(masterList)):
try:
ws.append(masterList[seq][row])
except IndexError:
ws.append([])
ws.append([])
wb.save(saveFile+'.xlsx')
'''color match'''
match = input('Do you want to color match your sequence (y/n)? ')
if match == 'y':
wb = opx.load_workbook(saveFile+'.xlsx')
sheet = wb['Sheet']
ws = wb.active
red = 'FFFF0000'
green = '0000FF00'
blue = 'FF0000FF'
greenFill = PatternFill(start_color=green,
end_color=green,
fill_type='solid')
redFill = PatternFill(start_color=red,
end_color=red,
fill_type='solid')
blueFill = PatternFill(start_color=blue,
end_color=blue,
fill_type='solid')
ws['BK1'] = 'Matched'
ws['BK1'].fill = greenFill
ws['BK2'] = 'Unmatched'
ws['BK2'].fill = blueFill
lastRow = sheet.max_row + 1
end = int(lastRow / (seqs+1))
for section in range(end):
startSec = (seqs+1)*section + 1
endSec = (seqs+1)*section + (seqs+1)
for col in range(2,62):
bp = []
for row in range(startSec,endSec):
cell = sheet.cell(row=row,column=col).value
bp.append(cell)
if bp.count(bp[0]) == seqs:
for row in range(startSec,endSec):
sheet.cell(row=row,column=col).fill = greenFill
else:
for row in range(startSec,endSec):
sheet.cell(row=row,column=col).fill = blueFill
wb.save(saveFile+'.xlsx')
|
7,891 | c76fd9b196b50e6fcced7e56517c0cd8ab30e24e | from . import preprocess
from . import utils
import random
import pickle
import feather
import time
import datetime
import sys
import os
import numpy as np
import pandas as pd
import json
from ...main import api
from flask import request
from flask_restplus import Resource, fields
import warnings
warnings.simplefilter("ignore")
predict_fields = api.model('Prediction Data', {
})
predict_accounts = api.model('Prediction Data By Employee', {
})
prediction = api.model('Prediction', {'attritionproba': fields.Float(
example=0.345), 'attritiondate': fields.String(example='2020-10-06T00:00:00.000Z')})
predictionByEmployee = api.model('Prediction By Employee', {})
model = api.model(
'Predictions', {'predictions': fields.List(fields.Nested(prediction))})
modelByEmployee = api.model(
'Predictions By Employee', {'predictions': fields.List(fields.Nested(predictionByEmployee))})
parser = api.parser()
parser.add_argument('predictdate', location='args', default=datetime.date.today().strftime("%Y-%m-%d"), help='Predict date', required=True)
@api.route("/predict")
@api.expect(parser)
class Predict(Resource):
@api.expect(predict_fields)
@api.marshal_with(model)
def post(self):
args = parser.parse_args()
return getPrediction(request.get_json(), args['predictdate'])
@api.route("/predict/<string:companyid>/<string:accountid>")
@api.expect(parser)
class PredictEmployeeByCompany(Resource):
@api.marshal_with(modelByEmployee)
def get(self, companyid, accountid):
args = parser.parse_args()
return getPredictionByEmployee(companyid, [int(accountid)], args['predictdate'])
@api.route("/predict/<string:companyid>")
@api.expect(parser)
class PredictByCompany(Resource):
@api.marshal_with(modelByEmployee)
def get(self, companyid):
args = parser.parse_args()
return getPredictionByEmployee(companyid, None, args['predictdate'])
@api.expect(predict_accounts)
@api.marshal_with(modelByEmployee)
def post(self, companyid):
args = parser.parse_args()
return getPredictionByEmployee(companyid, request.get_json()['accountids'], args['predictdate'])
package_directory = os.path.dirname(os.path.abspath(__file__))
def predict_class(local_model, df):
if os.path.isfile(local_model):
model = pickle.load(open(local_model, 'rb'))
result = pd.Series(model.predict_proba(df)[:, 1])
else:
result = pd.Series(random.sample(
range(1000), df.shape[0])).divide(10000)
return result
def predict_reg(local_model, df):
if os.path.isfile(local_model):
model = pickle.load(open(local_model, 'rb'))
result = pd.Series(model.predict(df)).apply(int).clip(lower=0)
else:
result = pd.Series(random.sample(range(100, 1000), df.shape[0]))
return result
def getPrediction(data, predictdate=np.datetime64('today')):
request_json = data
if request_json and 'instances' in request_json and 'companyid' in request_json and 'columns' in request_json:
sys.stdout = open(utils.log_dir + time.strftime("%Y%m%d-%H%M%S") + '_predict.txt', 'w')
# copy model
companyid = str(request_json['companyid'])
print(datetime.datetime.now(), 'Predict for company', companyid)
local_class_model = utils.model_dir + companyid + '/classification/model.pkl'
local_reg_model = utils.model_dir + companyid + '/regression/model.pkl'
columns = request_json['columns']
df = pd.DataFrame(request_json['instances'], columns=columns)
df_1 = preprocess.preprocessDF(df, utils.model_dir + companyid + '/', predictdate)
df_1 = df_1.drop(['CompId', 'AccountId', 'AttritionReasonId', 'AttritionDays', 'IsAttrition', 'ReasonId'], axis=1, errors='ignore')
data = {}
result_class = predict_class(local_class_model, df_1)
result_reg = predict_reg(local_reg_model, df_1)
df['HiredOrReHired'] = df['HiredOrReHired'].astype('datetime64[D]')
result_date = df['HiredOrReHired'] + pd.to_timedelta(result_reg, 'D')
data['predictions'] = json.loads(pd.DataFrame({'attritionproba': result_class, 'attritiondate': result_date}).to_json(orient='records', date_format='iso'))
sys.stdout.close()
return data
else:
return {'attritionproba': 0, 'attritiondate': ''}
def getPredictionByEmployee(companyid, accountid=None, predictdate=np.datetime64('today')):
sys.stdout = open(
utils.log_dir + time.strftime("%Y%m%d-%H%M%S") + '_predict.txt', 'w')
# copy model
print(datetime.datetime.now(), 'Predict for company', companyid)
local_class_model = utils.model_dir + companyid + '/classification/model.pkl'
local_reg_model = utils.model_dir + companyid + '/regression/model.pkl'
if np.datetime64(predictdate) >= np.datetime64('today'):
strtodate = ''
else:
strtodate = np.datetime64(predictdate).astype(datetime.datetime).strftime('%Y%m')
if os.path.isfile(utils.data_dir + companyid + '/preparedData_test' + strtodate + '.feather'):
df = feather.read_dataframe(utils.data_dir + companyid + '/preparedData_test' + strtodate + '.feather')
else:
df = pd.read_csv(utils.data_dir + companyid + '/preparedData_test' + strtodate + '.csv', low_memory=False)
feather.write_dataframe(df, utils.data_dir + companyid + '/preparedData_test' + strtodate + '.feather')
if os.path.isfile(utils.model_dir + companyid + '/preprocessedData_test' + strtodate + '.feather'):
df_1 = feather.read_dataframe(utils.model_dir + companyid + '/preprocessedData_test' + strtodate + '.feather')
else:
df_1 = pd.read_csv(utils.model_dir + companyid + '/preprocessedData_test' + strtodate + '.csv', low_memory=False)
feather.write_dataframe(df_1, utils.model_dir + companyid + '/preprocessedData_test' + strtodate + '.feather')
if accountid:
df = df.loc[(df['CompId'] == int(companyid)) & (df['AccountId'].isin(accountid))].reset_index(drop=True)
df_1 = df_1.loc[(df_1['CompId'] == int(companyid)) & (df_1['AccountId'].isin(accountid))].reset_index(drop=True)
else:
df = df.loc[(df['CompId'] == int(companyid))]
df_1 = df_1.loc[(df['CompId'] == int(companyid))]
#df_1 = preprocess.preprocessDF(df, utils.model_dir + companyid + '/', np.datetime64(predictdate))
df_1 = df_1.drop(['CompId', 'AccountId', 'AttritionReasonId', 'AttritionDays', 'IsAttrition', 'ReasonId'], axis=1, errors='ignore')
print(datetime.datetime.now(), 'Predict for data', df_1.shape)
data = {}
result_class = predict_class(local_class_model, df_1)
result_reg = predict_reg(local_reg_model, df_1)
df['HiredOrReHired'] = df['HiredOrReHired'].astype('datetime64[D]')
result_date = df['HiredOrReHired'] + pd.to_timedelta(result_reg, 'D')
data['predictions'] = json.loads(pd.DataFrame(
{'accountid': df['AccountId'], 'attritionproba': result_class, 'attritiondate': result_date}).to_json(orient='records', date_format='iso'))
sys.stdout.close()
return data
|
7,892 | b3095f181032727544ce3ee6f1ad3a70976c0061 | # Copyright (c) 2018-2020, NVIDIA CORPORATION.
import os
import shutil
import subprocess
import sys
import sysconfig
from distutils.spawn import find_executable
from distutils.sysconfig import get_python_lib
import numpy as np
import pyarrow as pa
from Cython.Build import cythonize
from Cython.Distutils import build_ext
from setuptools import find_packages, setup
from setuptools.extension import Extension
import versioneer
install_requires = ["numba", "cython"]
cython_files = ["cudf/**/*.pyx"]
CUDA_HOME = os.environ.get("CUDA_HOME", False)
if not CUDA_HOME:
path_to_cuda_gdb = shutil.which("cuda-gdb")
if path_to_cuda_gdb is None:
raise OSError(
"Could not locate CUDA. "
"Please set the environment variable "
"CUDA_HOME to the path to the CUDA installation "
"and try again."
)
CUDA_HOME = os.path.dirname(os.path.dirname(path_to_cuda_gdb))
if not os.path.isdir(CUDA_HOME):
raise OSError(f"Invalid CUDA_HOME: directory does not exist: {CUDA_HOME}")
cuda_include_dir = os.path.join(CUDA_HOME, "include")
CUDF_ROOT = os.environ.get("CUDF_ROOT", "../../cpp/build/")
try:
nthreads = int(os.environ.get("PARALLEL_LEVEL", "0") or "0")
except Exception:
nthreads = 0
cmdclass = versioneer.get_cmdclass()
class build_ext_and_proto(build_ext):
def run(self):
# Get protoc
protoc = None
if "PROTOC" in os.environ and os.path.exists(os.environ["PROTOC"]):
protoc = os.environ["PROTOC"]
else:
protoc = find_executable("protoc")
if protoc is None:
sys.stderr.write("protoc not found")
sys.exit(1)
# Build .proto file
for source in ["cudf/utils/metadata/orc_column_statistics.proto"]:
output = source.replace(".proto", "_pb2.py")
if not os.path.exists(output) or (
os.path.getmtime(source) > os.path.getmtime(output)
):
with open(output, "a") as src:
src.write("# flake8: noqa" + os.linesep)
src.write("# fmt: off" + os.linesep)
subprocess.check_call([protoc, "--python_out=.", source])
with open(output, "r+") as src:
new_src_content = (
"# flake8: noqa"
+ os.linesep
+ "# fmt: off"
+ os.linesep
+ src.read()
+ "# fmt: on"
+ os.linesep
)
src.seek(0)
src.write(new_src_content)
# Run original Cython build_ext command
build_ext.run(self)
cmdclass["build_ext"] = build_ext_and_proto
extensions = [
Extension(
"*",
sources=cython_files,
include_dirs=[
"../../cpp/include/cudf",
"../../cpp/include",
os.path.join(CUDF_ROOT, "include"),
os.path.join(CUDF_ROOT, "_deps/libcudacxx-src/include"),
os.path.join(
os.path.dirname(sysconfig.get_path("include")),
"libcudf/libcudacxx",
),
os.path.dirname(sysconfig.get_path("include")),
np.get_include(),
pa.get_include(),
cuda_include_dir,
],
library_dirs=(
pa.get_library_dirs()
+ [get_python_lib(), os.path.join(os.sys.prefix, "lib")]
),
libraries=["cudf"] + pa.get_libraries() + ["arrow_cuda"],
language="c++",
extra_compile_args=["-std=c++14"],
)
]
setup(
name="cudf",
version=versioneer.get_version(),
description="cuDF - GPU Dataframe",
url="https://github.com/rapidsai/cudf",
author="NVIDIA Corporation",
license="Apache 2.0",
classifiers=[
"Intended Audience :: Developers",
"Topic :: Database",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
# Include the separately-compiled shared library
setup_requires=["cython", "protobuf"],
ext_modules=cythonize(
extensions,
nthreads=nthreads,
compiler_directives=dict(
profile=False, language_level=3, embedsignature=True
),
),
packages=find_packages(include=["cudf", "cudf.*"]),
package_data=dict.fromkeys(
find_packages(include=["cudf._lib*"]), ["*.pxd"],
),
cmdclass=cmdclass,
install_requires=install_requires,
zip_safe=False,
)
|
7,893 | 548c4dbfc1456fead75c22927ae7c6224fafeace | #!/home/porosya/.local/share/virtualenvs/checkio-VEsvC6M1/bin/checkio --domain=py run inside-block
# https://py.checkio.org/mission/inside-block/
# When it comes to city planning it's import to understand the borders of various city structures. Parks, lakes or living blocks can be represented as closed polygon and can be described using cartesian coordinates on a map . We need functionality to determine is a point (a building or a tree) lies inside the structure.
#
# For the purpose of this mission, a city structure may be considered a polygon represented as a sequence of vertex coordinates on a plane or map. The vertices are connected sequentially with the last vertex in the list connecting to the first. We are given the coordinates of the point which we need to check. If the point of impact lies on the edge of the polygon then it should be considered inside it. For this mission, you need to determine whether the given point lies inside the polygon.
#
#
# END_DESC
def is_inside(polygon, point):
return True or False
if __name__ == '__main__':
assert is_inside(((1, 1), (1, 3), (3, 3), (3, 1)),
(2, 2)) == True, "First"
assert is_inside(((1, 1), (1, 3), (3, 3), (3, 1)),
(4, 2)) == False, "Second"
assert is_inside(((1, 1), (4, 1), (2, 3)),
(3, 2)) == True, "Third"
assert is_inside(((1, 1), (4, 1), (1, 3)),
(3, 3)) == False, "Fourth"
assert is_inside(((2, 1), (4, 1), (5, 3), (3, 4), (1, 3)),
(4, 3)) == True, "Fifth"
assert is_inside(((2, 1), (4, 1), (3, 2), (3, 4), (1, 3)),
(4, 3)) == False, "Sixth"
assert is_inside(((1, 1), (3, 2), (5, 1), (4, 3), (5, 5), (3, 4), (1, 5), (2, 3)),
(3, 3)) == True, "Seventh"
assert is_inside(((1, 1), (1, 5), (5, 5), (5, 4), (2, 4), (2, 2), (5, 2), (5, 1)),
(4, 3)) == False, "Eighth" |
7,894 | 8a4fe88bfa39eeeda42198260a1b22621c33183e | import datetime
from threading import Thread
import cv2
class WebcamVideoStream:
#Constructor
def __init__(self, src=0):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
# calling update method causes the method to be placed in separate thread from main script - hence better FPS!
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
print("returning")
cv2.destroyAllWindows()
return
# otherwise, read the next frame from the stream
print("before")
(self.grabbed, self.frame) = self.stream.read()
print("after")
def read(self):
# return the frame most recently read
print("in read func func")
return self.frame
def stop(self):
# indicate that the thread should be stopped
print("Stop in thread!")
self.stopped = True
|
7,895 | f82ddc34fde76ddfbbe75116526af45b83c1b102 | # Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import unittest
from pyscf import gto
import os
from pyscf.nao import nao, prod_basis
ag_s7l7_wonatoms = """
H 2.346340 -0.000093 -1.449987
H 2.346702 -0.000095 1.450132
H -2.345370 -0.000086 -1.449228
H -2.345734 -0.000089 1.449376
H -1.449887 2.346112 -0.000046
H 1.450134 2.346853 -0.000044
H -1.449224 -2.345222 -0.000041
H 1.449464 -2.345958 -0.000038
H -0.000112 -1.449738 2.345957
H -0.000111 1.449980 2.346608
H -0.000111 -1.449377 -2.345464
H -0.000107 1.449607 -2.346103
H 4.731536 -0.000009 -2.923633
H 4.794344 0.000006 0.000053
H 4.731450 -0.000009 2.923590
H -4.731847 -0.000004 -2.923807
H -4.794483 0.000008 0.000053
H -4.731757 -0.000006 2.923758
H -2.923688 4.731598 0.000002
H 0.000077 4.794367 0.000013
H 2.923553 4.731432 0.000002
H -2.923845 -4.731869 0.000004
H 0.000084 -4.794470 0.000009
H 2.923708 -4.731700 0.000004
H -0.000016 -2.923710 4.731655
H -0.000002 0.000081 4.794386
H -0.000017 2.923594 4.731497
H -0.000016 -2.923799 -4.731798
H -0.000002 0.000083 -4.794441
H -0.000018 2.923687 -4.731644
H 2.396856 -1.481019 3.878620
H 2.396773 1.481058 3.878614
H 2.396905 -1.481021 -3.878636
H 2.396824 1.481062 -3.878634
H -2.396782 -1.481108 3.878712
H -2.396699 1.481149 3.878709
H -2.396832 -1.481114 -3.878735
H -2.396748 1.481155 -3.878730
H 3.878596 2.396822 -1.481024
H 3.878589 2.396778 1.481062
H -3.878672 2.396916 -1.481031
H -3.878666 2.396868 1.481064
H 3.878682 -2.396737 -1.481107
H 3.878676 -2.396695 1.481146
H -3.878757 -2.396826 -1.481115
H -3.878754 -2.396779 1.481148
H -1.481047 3.878627 2.396831
H 1.481072 3.878617 2.396742
H -1.481055 -3.878680 2.396921
H 1.481078 -3.878674 2.396834
H -1.481096 3.878678 -2.396773
H 1.481119 3.878671 -2.396685
H -1.481102 -3.878731 -2.396860
H 1.481126 -3.878722 -2.396772
H 7.150331 0.000013 -4.418604
H 7.225782 0.000009 -1.477531
H 7.225777 0.000009 1.477551
H 7.150346 0.000010 4.418636
H -7.150239 0.000015 -4.418552
H -7.225701 0.000010 -1.477539
H -7.225697 0.000009 1.477559
H -7.150257 0.000015 4.418586
H -4.418596 7.150312 0.000012
H -1.477538 7.225777 0.000010
H 1.477536 7.225775 0.000011
H 4.418635 7.150362 0.000012
H -4.418553 -7.150222 0.000020
H -1.477554 -7.225705 0.000012
H 1.477559 -7.225701 0.000011
H 4.418598 -7.150270 0.000013
H 0.000008 -4.418580 7.150295
H 0.000006 -1.477536 7.225760
H 0.000007 1.477549 7.225757
H 0.000007 4.418626 7.150335
H 0.000008 -4.418561 -7.150247
H 0.000006 -1.477545 -7.225726
H 0.000006 1.477561 -7.225723
H 0.000007 4.418613 -7.150287
H 4.808303 -1.493555 5.388587
H 2.417464 -2.971656 6.301956
H 4.808308 1.493581 5.388605
H 2.431587 0.000014 6.366095
H 2.417478 2.971674 6.301966
H 4.808303 -1.493546 -5.388552
H 2.417452 -2.971655 -6.301934
H 4.808310 1.493573 -5.388572
H 2.431585 0.000016 -6.366071
H 2.417464 2.971677 -6.301941
H -4.808288 -1.493538 5.388559
H -2.417439 -2.971638 6.301924
H -4.808292 1.493569 5.388578
H -2.431572 0.000013 6.366082
H -2.417452 2.971656 6.301933
H -4.808287 -1.493528 -5.388525
H -2.417427 -2.971639 -6.301899
H -4.808292 1.493561 -5.388546
H -2.431572 0.000014 -6.366056
H -2.417439 2.971659 -6.301909
H 5.388603 4.808319 -1.493559
H 6.301970 2.417487 -2.971653
H 5.388608 4.808321 1.493584
H 6.366098 2.431602 0.000014
H 6.301967 2.417490 2.971675
H -5.388548 4.808294 -1.493543
H -6.301922 2.417455 -2.971644
H -5.388553 4.808296 1.493566
H -6.366058 2.431589 0.000013
H -6.301920 2.417459 2.971662
H 5.388578 -4.808301 -1.493544
H 6.301948 -2.417448 -2.971646
H 5.388584 -4.808302 1.493574
H 6.366092 -2.431572 0.000013
H 6.301945 -2.417454 2.971667
H -5.388520 -4.808272 -1.493529
H -6.301896 -2.417412 -2.971637
H -5.388529 -4.808274 1.493557
H -6.366050 -2.431556 0.000012
H -6.301896 -2.417417 2.971658
H -1.493562 5.388587 4.808302
H -2.971658 6.301956 2.417476
H 1.493576 5.388607 4.808320
H 0.000001 6.366100 2.431597
H 2.971666 6.301972 2.417493
H -1.493543 -5.388526 4.808289
H -2.971649 -6.301906 2.417444
H 1.493555 -5.388547 4.808306
H 0.000002 -6.366051 2.431589
H 2.971661 -6.301921 2.417458
H -1.493560 5.388586 -4.808287
H -2.971654 6.301946 -2.417449
H 1.493572 5.388604 -4.808304
H -0.000002 6.366099 -2.431566
H 2.971663 6.301961 -2.417463
H -1.493541 -5.388524 -4.808272
H -2.971647 -6.301895 -2.417411
H 1.493554 -5.388544 -4.808291
H 0.000005 -6.366052 -2.431562
H 2.971660 -6.301911 -2.417425
H 3.933950 -3.933932 3.933958
H 3.933959 3.933967 3.933963
H 3.933948 -3.933931 -3.933931
H 3.933957 3.933967 -3.933938
H -3.933922 -3.933912 3.933940
H -3.933929 3.933948 3.933945
H -3.933921 -3.933910 -3.933915
H -3.933929 3.933948 -3.933922
H 9.586490 0.000010 -5.924178
H 9.665986 0.000008 -2.972164
H 9.696371 0.000007 0.000010
H 9.665971 0.000008 2.972190
H 9.586467 0.000009 5.924179
H -9.586484 0.000013 -5.924188
H -9.665980 0.000009 -2.972165
H -9.696370 0.000008 0.000012
H -9.665971 0.000008 2.972185
H -9.586466 0.000011 5.924187
H -5.924179 9.586482 0.000006
H -2.972172 9.665973 0.000007
H 0.000003 9.696364 0.000008
H 2.972183 9.665974 0.000008
H 5.924181 9.586480 0.000005
H -5.924189 -9.586477 0.000008
H -2.972172 -9.665971 0.000010
H 0.000008 -9.696374 0.000008
H 2.972190 -9.665972 0.000008
H 5.924193 -9.586474 0.000007
H 0.000007 -5.924168 9.586474
H 0.000007 -2.972167 9.665967
H 0.000006 0.000007 9.696360
H 0.000008 2.972186 9.665963
H 0.000006 5.924181 9.586462
H 0.000007 -5.924186 -9.586489
H 0.000007 -2.972170 -9.665982
H 0.000006 0.000009 -9.696379
H 0.000007 2.972190 -9.665979
H 0.000006 5.924201 -9.586480
H 7.237307 -1.500148 6.901190
H 4.847669 -2.996238 7.843968
H 2.428125 -4.472804 8.738149
H 7.237301 1.500165 6.901187
H 4.862151 0.000009 7.909330
H 2.452082 -1.489596 8.829834
H 4.847661 2.996251 7.843964
H 2.452083 1.489612 8.829831
H 2.428122 4.472818 8.738145
H 7.237323 -1.500151 -6.901186
H 4.847685 -2.996244 -7.843971
H 2.428136 -4.472811 -8.738153
H 7.237318 1.500169 -6.901185
H 4.862162 0.000010 -7.909328
H 2.452091 -1.489597 -8.829834
H 4.847679 2.996257 -7.843969
H 2.452080 1.489614 -8.829849
H 2.428132 4.472826 -8.738153
H -7.237293 -1.500149 6.901191
H -4.847661 -2.996241 7.843973
H -2.428114 -4.472804 8.738152
H -7.237288 1.500171 6.901188
H -4.862140 0.000012 7.909329
H -2.452056 -1.489593 8.829846
H -4.847654 2.996255 7.843972
H -2.452058 1.489609 8.829843
H -2.428111 4.472819 8.738150
H -7.237306 -1.500153 -6.901191
H -4.847677 -2.996244 -7.843973
H -2.428122 -4.472813 -8.738157
H -7.237299 1.500176 -6.901188
H -4.862151 0.000012 -7.909331
H -2.452064 -1.489595 -8.829852
H -4.847670 2.996260 -7.843970
H -2.452064 1.489612 -8.829849
H -2.428117 4.472829 -8.738155
H 6.901190 7.237319 -1.500150
H 7.843970 4.847677 -2.996237
H 8.738156 2.428132 -4.472807
H 6.901187 7.237315 1.500164
H 7.909334 4.862159 0.000009
H 8.829843 2.452089 -1.489594
H 7.843962 4.847670 2.996250
H 8.829839 2.452089 1.489612
H 8.738147 2.428123 4.472820
H -6.901186 7.237313 -1.500153
H -7.843972 4.847681 -2.996242
H -8.738156 2.428143 -4.472808
H -6.901182 7.237309 1.500168
H -7.909325 4.862158 0.000009
H -8.829845 2.452079 -1.489595
H -7.843964 4.847675 2.996256
H -8.829842 2.452078 1.489612
H -8.738145 2.428137 4.472818
H 6.901200 -7.237306 -1.500153
H 7.843981 -4.847666 -2.996242
H 8.738162 -2.428114 -4.472812
H 6.901198 -7.237297 1.500171
H 7.909337 -4.862146 0.000011
H 8.829845 -2.452068 -1.489594
H 7.843972 -4.847660 2.996258
H 8.829841 -2.452068 1.489613
H 8.738152 -2.428107 4.472824
H -6.901190 -7.237297 -1.500154
H -7.843979 -4.847668 -2.996248
H -8.738159 -2.428121 -4.472812
H -6.901189 -7.237293 1.500171
H -7.909326 -4.862145 0.000009
H -8.829847 -2.452058 -1.489596
H -7.843972 -4.847664 2.996263
H -8.829843 -2.452057 1.489614
H -8.738148 -2.428115 4.472824
H -1.500153 6.901189 7.237302
H -2.996243 7.843968 4.847670
H -4.472809 8.738156 2.428132
H 1.500162 6.901186 7.237307
H 0.000005 7.909331 4.862151
H -1.489597 8.829847 2.452075
H 2.996247 7.843960 4.847669
H 1.489606 8.829848 2.452078
H 4.472817 8.738150 2.428124
H -1.500152 -6.901177 7.237309
H -2.996247 -7.843968 4.847679
H -4.472811 -8.738149 2.428139
H 1.500163 -6.901178 7.237314
H 0.000006 -7.909318 4.862160
H -1.489601 -8.829840 2.452076
H 2.996251 -7.843960 4.847678
H 1.489612 -8.829840 2.452078
H 4.472822 -8.738143 2.428130
H -1.500158 6.901205 -7.237297
H -2.996246 7.843981 -4.847659
H -4.472815 8.738162 -2.428115
H 1.500171 6.901202 -7.237297
H 0.000007 7.909340 -4.862141
H -1.489596 8.829841 -2.452068
H 2.996253 7.843975 -4.847660
H 1.489606 8.829841 -2.452069
H 4.472822 8.738158 -2.428111
H -1.500158 -6.901191 -7.237303
H -2.996251 -7.843978 -4.847665
H -4.472815 -8.738153 -2.428116
H 1.500171 -6.901191 -7.237303
H 0.000007 -7.909327 -4.862149
H -1.489602 -8.829834 -2.452067
H 2.996258 -7.843975 -4.847668
H 1.489613 -8.829834 -2.452069
H 4.472827 -8.738151 -2.428114
H 6.377354 -3.967092 5.457178
H 6.377349 3.967112 5.457173
H 5.457177 -6.377342 3.967111
H 5.457169 6.377356 3.967110
H 3.967106 -5.457157 6.377359
H 3.967106 5.457174 6.377352
H 6.377366 -3.967096 -5.457166
H 6.377363 3.967116 -5.457163
H 5.457183 -6.377353 -3.967095
H 5.457176 6.377363 -3.967098
H 3.967113 -5.457167 -6.377350
H 3.967114 5.457185 -6.377343
H -6.377340 -3.967091 5.457179
H -6.377336 3.967112 5.457174
H -5.457166 -6.377340 3.967111
H -5.457161 6.377353 3.967111
H -3.967095 -5.457156 6.377359
H -3.967095 5.457171 6.377352
H -6.377355 -3.967095 -5.457168
H -6.377350 3.967118 -5.457163
H -5.457171 -6.377349 -3.967099
H -5.457166 6.377363 -3.967100
H -3.967102 -5.457168 -6.377348
H -3.967101 5.457185 -6.377342
H 12.038641 0.000003 -7.440548
H 12.099988 0.000003 -4.483580
H 12.144240 0.000006 -1.497071
H 12.144233 0.000006 1.497083
H 12.099971 0.000003 4.483587
H 12.038623 0.000004 7.440541
H -12.038651 -0.000000 -7.440559
H -12.099994 0.000006 -4.483580
H -12.144253 0.000006 -1.497073
H -12.144248 0.000005 1.497083
H -12.099982 0.000005 4.483584
H -12.038629 0.000000 7.440551
H -7.440551 12.038630 0.000002
H -4.483583 12.099975 0.000003
H -1.497079 12.144221 0.000004
H 1.497075 12.144221 0.000005
H 4.483580 12.099968 0.000003
H 7.440535 12.038629 0.000002
H -7.440565 -12.038641 0.000002
H -4.483589 -12.099999 0.000005
H -1.497078 -12.144261 0.000007
H 1.497079 -12.144262 0.000005
H 4.483587 -12.099997 0.000004
H 7.440549 -12.038645 0.000001
H -0.000009 -7.440541 12.038631
H 0.000001 -4.483578 12.099976
H 0.000001 -1.497071 12.144229
H 0.000001 1.497078 12.144221
H 0.000002 4.483585 12.099964
H -0.000007 7.440539 12.038614
H -0.000008 -7.440559 -12.038653
H 0.000002 -4.483583 -12.100005
H 0.000001 -1.497073 -12.144265
H 0.000002 1.497082 -12.144259
H 0.000001 4.483590 -12.099993
H -0.000006 7.440555 -12.038641
H 9.677193 -1.496906 8.403454
H 7.283257 -3.003960 9.361664
H 4.860366 -4.500621 10.287633
H 2.422429 -5.980893 11.174337
H 9.677186 1.496910 8.403449
H 7.301204 0.000004 9.440827
H 4.899855 -1.498367 10.403244
H 2.464544 -2.988767 11.288143
H 7.283251 3.003967 9.361652
H 4.899853 1.498377 10.403239
H 2.475659 0.000006 11.329680
H 4.860357 4.500623 10.287616
H 2.464540 2.988773 11.288133
H 2.422424 5.980895 11.174321
H 9.677209 -1.496912 -8.403465
H 7.283277 -3.003970 -9.361682
H 4.860384 -4.500636 -10.287647
H 2.422443 -5.980905 -11.174358
H 9.677204 1.496921 -8.403460
H 7.301220 0.000005 -9.440842
H 4.899871 -1.498371 -10.403268
H 2.464557 -2.988775 -11.288169
H 7.283270 3.003976 -9.361672
H 4.899868 1.498380 -10.403263
H 2.475670 0.000005 -11.329709
H 4.860376 4.500637 -10.287640
H 2.464554 2.988779 -11.288159
H 2.422434 5.980909 -11.174343
H -9.677190 -1.496908 8.403467
H -7.283257 -3.003963 9.361672
H -4.860371 -4.500629 10.287635
H -2.422430 -5.980892 11.174337
H -9.677187 1.496917 8.403462
H -7.301202 0.000005 9.440837
H -4.899849 -1.498370 10.403248
H -2.464542 -2.988769 11.288143
H -7.283250 3.003968 9.361662
H -4.899846 1.498382 10.403245
H -2.475653 0.000007 11.329681
H -4.860362 4.500633 10.287620
H -2.464539 2.988776 11.288131
H -2.422424 5.980896 11.174322
H -9.677207 -1.496913 -8.403482
H -7.283279 -3.003972 -9.361692
H -4.860387 -4.500634 -10.287661
H -2.422442 -5.980906 -11.174359
H -9.677203 1.496924 -8.403476
H -7.301216 0.000005 -9.440853
H -4.899865 -1.498371 -10.403273
H -2.464552 -2.988774 -11.288171
H -7.283271 3.003977 -9.361683
H -4.899862 1.498381 -10.403269
H -2.475664 0.000006 -11.329711
H -4.860376 4.500638 -10.287646
H -2.464549 2.988781 -11.288160
H -2.422433 5.980910 -11.174344
H 8.403451 9.677198 -1.496905
H 9.361660 7.283263 -3.003959
H 10.287632 4.860371 -4.500620
H 11.174342 2.422434 -5.980899
H 8.403446 9.677194 1.496911
H 9.440827 7.301209 0.000005
H 10.403246 4.899862 -1.498367
H 11.288149 2.464551 -2.988769
H 9.361650 7.283254 3.003966
H 10.403241 4.899860 1.498377
H 11.329691 2.475665 0.000007
H 10.287617 4.860361 4.500626
H 11.288137 2.464548 2.988779
H 11.174326 2.422424 5.980898
H -8.403460 9.677196 -1.496910
H -9.361670 7.283266 -3.003964
H -10.287643 4.860386 -4.500636
H -11.174348 2.422445 -5.980904
H -8.403456 9.677194 1.496915
H -9.440835 7.301210 0.000006
H -10.403255 4.899863 -1.498369
H -11.288154 2.464562 -2.988776
H -9.361661 7.283260 3.003973
H -10.403249 4.899861 1.498381
H -11.329693 2.475669 0.000006
H -10.287628 4.860378 4.500640
H -11.288144 2.464559 2.988785
H -11.174330 2.422439 5.980903
H 8.403472 -9.677206 -1.496911
H 9.361686 -7.283270 -3.003967
H 10.287643 -4.860371 -4.500629
H 11.174350 -2.422427 -5.980904
H 8.403468 -9.677203 1.496918
H 9.440847 -7.301215 0.000005
H 10.403263 -4.899857 -1.498370
H 11.288155 -2.464540 -2.988768
H 9.361677 -7.283262 3.003974
H 10.403258 -4.899854 1.498381
H 11.329699 -2.475655 0.000006
H 10.287637 -4.860362 4.500633
H 11.288145 -2.464540 2.988780
H 11.174334 -2.422420 5.980905
H -8.403482 -9.677201 -1.496913
H -9.361695 -7.283272 -3.003971
H -10.287648 -4.860385 -4.500644
H -11.174357 -2.422436 -5.980909
H -8.403479 -9.677198 1.496923
H -9.440853 -7.301212 0.000006
H -10.403270 -4.899858 -1.498373
H -11.288161 -2.464554 -2.988778
H -9.361685 -7.283267 3.003981
H -10.403265 -4.899857 1.498385
H -11.329701 -2.475659 0.000006
H -10.287644 -4.860378 4.500647
H -11.288152 -2.464553 2.988786
H -11.174339 -2.422432 5.980907
H -1.496905 8.403449 9.677180
H -3.003957 9.361655 7.283252
H -4.500628 10.287621 4.860368
H -5.980895 11.174331 2.422431
H 1.496909 8.403447 9.677182
H 0.000004 9.440823 7.301201
H -1.498372 10.403237 4.899852
H -2.988773 11.288135 2.464550
H 3.003964 9.361649 7.283252
H 1.498374 10.403237 4.899852
H 0.000002 11.329673 2.475664
H 4.500620 10.287615 4.860360
H 2.988773 11.288132 2.464547
H 5.980894 11.174325 2.422427
H -1.496909 -8.403459 9.677196
H -3.003968 -9.361674 7.283271
H -4.500641 -10.287646 4.860388
H -5.980907 -11.174350 2.422445
H 1.496913 -8.403457 9.677200
H 0.000003 -9.440837 7.301216
H -1.498376 -10.403259 4.899869
H -2.988779 -11.288160 2.464559
H 3.003975 -9.361670 7.283272
H 1.498380 -10.403259 4.899869
H 0.000003 -11.329705 2.475670
H 4.500634 -10.287640 4.860379
H 2.988780 -11.288157 2.464554
H 5.980909 -11.174345 2.422438
H -1.496912 8.403471 -9.677194
H -3.003965 9.361677 -7.283259
H -4.500629 10.287638 -4.860363
H -5.980898 11.174338 -2.422428
H 1.496914 8.403469 -9.677196
H 0.000004 9.440843 -7.301203
H -1.498370 10.403254 -4.899850
H -2.988770 11.288143 -2.464539
H 3.003972 9.361671 -7.283257
H 1.498377 10.403253 -4.899848
H 0.000002 11.329681 -2.475654
H 4.500628 10.287631 -4.860359
H 2.988772 11.288140 -2.464537
H 5.980897 11.174332 -2.422423
H -1.496916 -8.403481 -9.677208
H -3.003978 -9.361696 -7.283276
H -4.500640 -10.287662 -4.860381
H -5.980909 -11.174356 -2.422438
H 1.496920 -8.403479 -9.677211
H 0.000003 -9.440857 -7.301218
H -1.498376 -10.403275 -4.899866
H -2.988776 -11.288169 -2.464544
H 3.003983 -9.361693 -7.283276
H 1.498382 -10.403275 -4.899865
H 0.000004 -11.329712 -2.475660
H 4.500643 -10.287646 -4.860375
H 2.988780 -11.288164 -2.464543
H 5.980912 -11.174352 -2.422432
H 8.823514 -3.987216 6.975976
H 8.823502 3.987220 6.975966
H 7.928324 -6.428771 5.503194
H 7.928307 6.428772 5.503181
H 6.975982 -8.823518 3.987231
H 6.975962 8.823505 3.987222
H 6.428780 -5.503180 7.928320
H 6.428768 5.503182 7.928308
H 5.503192 -7.928317 6.428783
H 5.503180 7.928307 6.428768
H 3.987227 -6.975968 8.823516
H 3.987219 6.975963 8.823502
H 8.823531 -3.987227 -6.975977
H 8.823524 3.987231 -6.975969
H 7.928335 -6.428786 -5.503192
H 7.928318 6.428785 -5.503181
H 6.975988 -8.823529 -3.987226
H 6.975969 8.823515 -3.987217
H 6.428797 -5.503193 -7.928331
H 6.428784 5.503196 -7.928316
H 5.503204 -7.928332 -6.428787
H 5.503193 7.928321 -6.428774
H 3.987241 -6.975984 -8.823526
H 3.987231 6.975978 -8.823511
H -8.823510 -3.987222 6.975980
H -8.823504 3.987228 6.975972
H -7.928324 -6.428779 5.503202
H -7.928304 6.428779 5.503191
H -6.975981 -8.823519 3.987237
H -6.975964 8.823508 3.987226
H -6.428779 -5.503186 7.928325
H -6.428768 5.503188 7.928311
H -5.503191 -7.928320 6.428792
H -5.503179 7.928309 6.428775
H -3.987221 -6.975969 8.823517
H -3.987213 6.975965 8.823503
H -8.823529 -3.987231 -6.975982
H -8.823521 3.987238 -6.975974
H -7.928336 -6.428791 -5.503200
H -7.928317 6.428791 -5.503190
H -6.975987 -8.823528 -3.987232
H -6.975972 8.823516 -3.987222
H -6.428795 -5.503199 -7.928335
H -6.428782 5.503201 -7.928321
H -5.503203 -7.928336 -6.428794
H -5.503191 7.928325 -6.428778
H -3.987236 -6.975985 -8.823528
H -3.987226 6.975980 -8.823514
"""
fname = "ag_s7l7_wonatoms.xyz"
fp = open(fname, "w")
fp.write(ag_s7l7_wonatoms)
fp.close()
# this does not work from python interp
#d = os.path.dirname(os.path.abspath(__file__))
mol = gto.M(
verbose = 1,
atom = open(fname).read()
)
class KnowValues(unittest.TestCase):
def test_ls_contributing(self):
""" To test the list of contributing centers """
sv = nao(gto=mol)
pb = prod_basis()
pb.sv = sv
pb.sv.ao_log.sp2rcut[0] = 10.0
pb.prod_log = sv.ao_log
pb.prod_log.sp2rcut[0] = 10.0
pb.ac_rcut = max(sv.ao_log.sp2rcut)
pb.ac_npc_max = 10
lsc = pb.ls_contributing(0,1)
self.assertEqual(len(lsc),10)
lsref = [ 0, 1, 13, 7, 5, 43, 42, 39, 38, 10]
for i,ref in enumerate(lsref) : self.assertEqual(lsc[i],ref)
if __name__ == "__main__": unittest.main()
|
7,896 | 9b02ce0b3acb14bdd6463c5bdba865b28253767c | from platypush.message.event import Event
class ClipboardEvent(Event):
def __init__(self, text: str, *args, **kwargs):
super().__init__(*args, text=text, **kwargs)
# vim:sw=4:ts=4:et:
|
7,897 | b4f522398cd2658c2db926216e974781e10c44df | import requests
#!/usr/bin/env python
from confluent_kafka import Producer, KafkaError
import json
import ccloud_lib
delivered_records = 0
url = "https://api.mockaroo.com/api/cbb61270?count=1000&key=5a40bdb0"
# Optional per-message on_delivery handler (triggered by poll() or flush())
# when a message has been successfully delivered or
# permanently failed delivery (after retries).
def acked(err, msg):
global delivered_records
"""Delivery report handler called on
successful or failed delivery of message
"""
if err is not None:
print("Failed to deliver message: {}".format(err))
else:
delivered_records += 1
print("Produced record to topic {} partition [{}] @ offset {}"
.format(msg.topic(), msg.partition(), msg.offset()))
#get mockaroo data records
#make sure mockaroo schema is set to output array
def get_data():
r = requests.get(url)
return '{ "data": ' + str(r.text) + '}'
def main():
# Read arguments and configurations and initialize
args = ccloud_lib.parse_args()
config_file = args.config_file
topic = args.topic
conf = ccloud_lib.read_ccloud_config(config_file)
# Create Producer instance
producer_conf = ccloud_lib.pop_schema_registry_params_from_config(conf)
producer = Producer(producer_conf)
# Create topic if needed
ccloud_lib.create_topic(conf, topic)
print("hello world")
d = get_data()
djson = json.loads(d)
darray = djson['data']
for item in darray:
record_key = str(item['_id'])
record_value = json.dumps(item)
print(record_value)
producer.produce(topic, key=record_key, value=record_value, on_delivery=acked)
producer.poll(0)
producer.flush()
print("{} messages were produced to topic {}!".format(delivered_records, topic))
if __name__ == '__main__':
main()
# to run program
# python user_purchases_to_kafka.py -f ~/.confluent/python.config -t user_purchases
# python user_activity_to_kafka.py -f ~/.confluent/python.config -t user_activity
|
7,898 | 50ae2b4c6d51451031fc31ebbc43c820da54d827 | import math
def hipotenusa(a,b):
return math.sqrt((a*a)+(b*b))
def main():
cateto1=input('dime un cateto')
cateto2=input('dime el otro cateto')
print ('la hipotenusa es: '),hipotenusa(cateto1,cateto2)
main()
|
7,899 | db920f4aadfb53bb26c5ba1fb182f12b95e14a2f | # Generated by Django 3.1.6 on 2021-02-05 00:27
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='tea',
name='caffeineLvl',
field=models.PositiveIntegerField(default=1, validators=[django.core.validators.MaxValueValidator(5), django.core.validators.MinValueValidator(1)]),
),
migrations.AlterField(
model_name='tea',
name='quantPerBox',
field=models.PositiveIntegerField(default=1, validators=[django.core.validators.MaxValueValidator(100), django.core.validators.MinValueValidator(1)]),
),
migrations.AlterField(
model_name='tea',
name='quantity',
field=models.PositiveIntegerField(default=1, validators=[django.core.validators.MaxValueValidator(100), django.core.validators.MinValueValidator(1)]),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.