content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
#!/usr/bin/python3
"""Explode an LLVM IR dump.
Given a debug dump generated from the -print-debug-all option, split out the
dumps into multiple files, one for each phase/procedure. Example usage:
clang -c -O3 -mllvm -print-before-all mumble.c 1> err.txt 2>&1
rm -rf /tmp/dumps ; mkdir /tmp/dumps
explode-llvm-ir-dumps.py -i err.txt -o /tmp/dumps
Right at the moment (this could change in the future), the output of
-print-before-all includes both function-scope dumps and module-scope
dumps. The expected pattern will be something like
mod dump 1 pass X
mod dump 2 pass Y
func 1 dump for pass A
func 1 dump for pass B
loop L1 dump for pass LP
loop L2 dump for pass LP
func 1 dump for pass C
func 2 dump for pass A
func 2 dump for pass B
func 2 dump for pass C
mod dump 3 pass Z
Typically what we're interested in doing is tracking a function over time
through the various dumps, so we emit an index ("index.txt") containing a
chronological listing of the dumps that mention a function (the assumption being
that a module dump mentions all functions).
"""
from collections import defaultdict
import getopt
import os
import re
import sys
import script_utils as u
# Dry run mode
flag_dryrun = False
# Echo commands mode
flag_echo = False
# Input file, output dir
flag_infile = None
flag_outdir = None
# Passes, functions
passes = {}
functions = {}
loops = {}
# Key is dump name (func:pass) and value is counter (how many
# dumps we've seen for this dumpname, since a given pass can
# happen more than once for a given function).
dumps = defaultdict(int)
# Complete listing of dump files in chronological order.
alldumps = []
# Keyed by function, value is a list of indices into the alldumps array.
funcdumps = defaultdict(list)
def docmd(cmd):
"""Execute a command."""
if flag_echo or flag_dryrun:
sys.stderr.write("executing: " + cmd + "\n")
if flag_dryrun:
return
u.docmd(cmd)
def dochdir(thedir):
"""Switch to dir."""
if flag_echo or flag_dryrun:
sys.stderr.write("cd " + thedir + "\n")
try:
os.chdir(thedir)
except OSError as err:
u.error("chdir failed: %s" % err)
def do_clean(subdir):
"""Clean this libgo dir."""
flavs = (".o", "gox", ".a", ".so", ".lo", ".la")
here = os.getcwd()
dochdir(subdir)
if flag_dryrun:
u.verbose(0, "... cleaning %s" % subdir)
else:
cmd = "find . -depth "
first = True
for item in flavs:
if not first:
cmd += " -o "
first = False
cmd += "-name '*%s' -print" % item
lines = u.docmdlines(cmd)
lines.reverse()
debris = lines
for d in debris:
if not d:
continue
u.verbose(1, "toclean '%s'" % d)
os.unlink(d)
dochdir(here)
def sanitize_pass(passname):
"""Sanitize passname to remove embedded spaces, etc."""
passname = passname.replace(" ", "_")
passname = passname.replace("(", ".LP")
passname = passname.replace(")", ".RP")
passname = passname.replace("/", ".SL")
passname = passname.replace("'", ".SQ")
return passname
def emitdump(passname, funcname, looplab, lines):
"""Emit single dump for module/pass or fn/pass."""
u.verbose(2, "emitdump(%s,%s,%s,lines=%d)" % (passname, funcname, looplab, len(lines)))
if not lines:
return
tag = funcname
if not funcname:
tag = "__module__"
if looplab:
dump = "%s:L%s:%s" % (tag, looplab, passname)
else:
dump = "%s:%s" % (tag, passname)
dumpver = dumps[dump]
dumps[dump] += 1
dumpname = "%s:%d" % (dump, dumpver)
ofname = os.path.join(flag_outdir, dumpname)
try:
with open(ofname, "w") as wf:
for line in lines:
wf.write(line)
except IOError:
u.error("open failed for %s" % ofname)
u.verbose(1, "emitted dump %d of %d "
"lines to %s" % (dumpver, len(lines), ofname))
# book-keeping
dumpidx = len(alldumps)
alldumps.append(dumpname)
if funcname:
funcdumps[funcname].append(dumpidx)
return dumpname
def process(rf):
"""Read lines from input file."""
# Note: dumps are emitted lazily, e.g. we read through all of dump K
# and into dump K+1 before emitting dump K.
lnum = 0
dumpre = re.compile(r"^\*\*\* IR Dump Before (\S.+)\s+\*\*\*\s*")
fnre = re.compile(r"^define\s\S.+\s\@(\S+)\(.+\).+\{\s*$")
modre = re.compile(r"^target datalayout =.*$")
loopre = re.compile(r"^\; Preheader\:\s*$")
labre = re.compile(r"^(\S+)\:.*$")
# Info on previous dump
curpass = None
curfunc = None
curloop = None
curdumplines = []
# Current dump flavor: one of 'module', 'function', 'loop', or 'unknown'
dumpflavor = "unknown"
# Lines in current dump
dumplines = []
# State information on the current loop.
passname = None
looplabel = None
while True:
lnum += 1
line = rf.readline()
if not line:
break
if dumpflavor == "unknown":
mloop = loopre.match(line)
if mloop:
# Emit previous dump
if curpass:
emitdump(curpass, curfunc, curloop, curdumplines)
# This is a loop dump. Note: keep curfunc.
dumpflavor = "loop"
u.verbose(1, "line %d: now in loop dump "
"for fn %s pass %s" % (lnum, curfunc, curpass))
mmod = modre.match(line)
if mmod:
# Emit previous dump
if curpass:
emitdump(curpass, curfunc, curloop, curdumplines)
# This is a module dump. Discard func.
dumpflavor = "module"
curfunc = None
u.verbose(1, "line %d: now in module dump "
"for pass %s" % (lnum, curpass))
mfn = fnre.match(line)
if mfn:
# Emit previous dump.
if curpass:
emitdump(curpass, curfunc, curloop, curdumplines)
curfunc = mfn.group(1)
functions[curfunc] = 1
u.verbose(1, "line %d: now in fn %s" % (lnum, curfunc))
if dumpflavor == "loop" and not looplabel:
mlab = labre.match(line)
if mlab:
looplabel = mlab.group(1)
loops[looplabel] = 1
u.verbose(1, "line %d: loop label is %s" % (lnum, looplabel))
mdmp = dumpre.match(line)
if mdmp:
curpass = passname
curloop = looplabel
if curloop:
u.verbose(1, "line %d: curloop now %s" % (lnum, curloop))
looplabel = None
passname = sanitize_pass(mdmp.group(1))
u.verbose(1, "line %d: passname is %s" % (lnum, passname))
curdumplines = dumplines
dumplines = []
passes[passname] = 1
dumpflavor = "unknown"
dumplines.append(line)
# emit final dump
if curpass:
emitdump(curpass, curfunc, curloop, curdumplines)
def emitstats():
"""Emit stats and index."""
indname = os.path.join(flag_outdir, "index.txt")
totaldumps = 0
for _, v in dumps.items():
totaldumps += v
u.verbose(0, "... captured %d total dumps, %d functions, "
"%d loops, %d passes" % (totaldumps, len(functions),
len(loops), len(passes)))
try:
with open(indname, "w") as wf:
sfuncs = sorted(functions.keys())
for f in sfuncs:
wf.write("\n\nfunction '%s':\n" % f)
indices = funcdumps[f]
for idx in indices:
dumpname = alldumps[idx]
wf.write(" %s\n" % dumpname)
except IOError:
u.error("open failed for %s" % indname)
u.verbose(0, "... emitted dump catalog to %s" % indname)
def perform():
"""Top level driver routine."""
try:
with open(flag_infile, "r") as rf:
process(rf)
except IOError:
u.error("open failed for %s" % flag_infile)
emitstats()
def usage(msgarg):
"""Print usage and exit."""
me = os.path.basename(sys.argv[0])
if msgarg:
sys.stderr.write("error: %s\n" % msgarg)
print("""\
usage: %s [options]
options:
-d increase debug msg verbosity level
-o X write dumps to dir X
-D dryrun mode (echo commands but do not execute)
""" % me)
sys.exit(1)
def parse_args():
"""Command line argument parsing."""
global flag_dryrun, flag_echo, flag_outdir, flag_infile
try:
optlist, args = getopt.getopt(sys.argv[1:], "deo:i:D")
except getopt.GetoptError as err:
# unrecognized option
usage(str(err))
if args:
usage("unknown extra args")
for opt, arg in optlist:
if opt == "-d":
u.increment_verbosity()
elif opt == "-e":
flag_echo = True
elif opt == "-D":
flag_dryrun = True
elif opt == "-o":
flag_outdir = arg
if not os.path.exists(flag_outdir):
usage("argument to -o flag '%s' not accessible" % arg)
if not os.path.isdir(flag_outdir):
usage("argument to -o flag '%s' not a directory" % arg)
elif opt == "-i":
flag_infile = arg
if not os.path.exists(flag_infile):
usage("argument to -i flag '%s' not accessible" % arg)
if not flag_outdir:
usage("supply out dir path with -o")
if not flag_infile:
usage("supply input file path with -i")
parse_args()
u.setdeflanglocale()
perform()
|
nilq/baby-python
|
python
|
# https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/Corpora_and_Vector_Spaces.ipynb
from gensim import corpora
# This is a tiny corpus of nine documents, each consisting of only a single sentence
documents = ["Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"]
# remove common words and tokenize
stoplist = set('for a of the and to in'.split())
texts = [[word for word in document.lower().split() if word not in stoplist] for document in documents]
# remove words that appear only once
from collections import defaultdict
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
texts = [[token for token in text if frequency[token] > 1] for text in texts]
from pprint import pprint # pretty printer
pprint(texts)
## bag of words
import os
TEMP_FOLDER = "strings_to_vectors"
dictionary = corpora.Dictionary(texts)
dictionary.save(os.path.join(TEMP_FOLDER, 'x.dict'))
print(dictionary)
# there are twelve distinct words in the processed corpus, which means each document will be represented by twelve numbers (ie., by a 12-D vector)
print(dictionary.token2id)
# convert tokenized documents to vectors
newdoc = "Human computer interaction"
# The function doc2bow() simply counts the number of occurrences of each distinct word, converts the word to its integer word id and returns the result as a bag-of-words--a sparse vector, in the form of [(word_id, word_count), ...]
newvec = dictionary.doc2bow(newdoc.lower().split())
print(newvec) # the word "interaction" does not appear in the dictionary and is ignored
# The token_id is 0 for "human" and 2 for "computer"
corpus = [dictionary.doc2bow(text) for text in texts]
corpora.MmCorpus.serialize(os.path.join(TEMP_FOLDER, 'x.mm'), corpus)
for c in corpus:
print(c)
# Corpus Streaming – One Document at a Time
class MyCorpus(object):
def __iter__(self):
for line in open(os.path.join(TEMP_FOLDER, 'mycorpus.txt')):
# assume there is one document per line, tokens separated by whitespace
yield dictionary.doc2bow(line.lower().split())
corpusmemoryfriendly = MyCorpus()
for vector in corpusmemoryfriendly:
print(vector)
|
nilq/baby-python
|
python
|
# Copyright (c) 2013 by Gilbert Ramirez <gram@alumni.rice.edu>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from dftestlib import dftest
class testIPv4(dftest.DFTest):
trace_file = "nfs.pcap"
def test_uint64_1(self):
dfilter = "nfs.fattr3.size == 264032"
self.assertDFilterCount(dfilter, 1)
def test_eq_1(self):
dfilter = "ip.src == 172.25.100.14"
self.assertDFilterCount(dfilter, 1)
def test_eq_2(self):
dfilter = "ip.src == 255.255.255.255"
self.assertDFilterCount(dfilter, 0)
def test_ne_1(self):
dfilter = "ip.src != 172.25.100.14"
self.assertDFilterCount(dfilter, 1)
def test_ne_2(self):
dfilter = "ip.src != 255.255.255.255"
self.assertDFilterCount(dfilter, 2)
def test_gt_1(self):
dfilter = "ip.dst > 198.95.230.200"
self.assertDFilterCount(dfilter, 0)
def test_gt_2(self):
dfilter = "ip.dst > 198.95.230.20"
self.assertDFilterCount(dfilter, 0)
def test_gt_3(self):
dfilter = "ip.dst > 198.95.230.10"
self.assertDFilterCount(dfilter, 1)
def test_ge_1(self):
dfilter = "ip.dst >= 198.95.230.200"
self.assertDFilterCount(dfilter, 0)
def test_ge_2(self):
dfilter = "ip.dst >= 198.95.230.20"
self.assertDFilterCount(dfilter, 1)
def test_ge_3(self):
dfilter = "ip.dst >= 198.95.230.10"
self.assertDFilterCount(dfilter, 1)
def test_lt_1(self):
dfilter = "ip.src < 172.25.100.140"
self.assertDFilterCount(dfilter, 1)
def test_lt_2(self):
dfilter = "ip.src < 172.25.100.14"
self.assertDFilterCount(dfilter, 0)
def test_lt_3(self):
dfilter = "ip.src < 172.25.100.10"
self.assertDFilterCount(dfilter, 0)
def test_le_1(self):
dfilter = "ip.src <= 172.25.100.140"
self.assertDFilterCount(dfilter, 1)
def test_le_2(self):
dfilter = "ip.src <= 172.25.100.14"
self.assertDFilterCount(dfilter, 1)
def test_le_3(self):
dfilter = "ip.src <= 172.25.100.10"
self.assertDFilterCount(dfilter, 0)
def test_cidr_eq_1(self):
dfilter = "ip.src == 172.25.100.14/32"
self.assertDFilterCount(dfilter, 1)
def test_cidr_eq_2(self):
dfilter = "ip.src == 172.25.100.0/24"
self.assertDFilterCount(dfilter, 1)
def test_cidr_eq_3(self):
dfilter = "ip.src == 172.25.0.0/16"
self.assertDFilterCount(dfilter, 1)
def test_cidr_eq_4(self):
dfilter = "ip.src == 172.0.0.0/8"
self.assertDFilterCount(dfilter, 1)
def test_cidr_ne_1(self):
dfilter = "ip.src != 172.25.100.14/32"
self.assertDFilterCount(dfilter, 1)
def test_cidr_ne_2(self):
dfilter = "ip.src != 172.25.100.0/24"
self.assertDFilterCount(dfilter, 1)
def test_cidr_ne_3(self):
dfilter = "ip.src != 172.25.0.0/16"
self.assertDFilterCount(dfilter, 1)
def test_cidr_ne_4(self):
dfilter = "ip.src != 200.0.0.0/8"
self.assertDFilterCount(dfilter, 2)
|
nilq/baby-python
|
python
|
# Author: Deepak Pathak (c) 2016
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# from __future__ import unicode_literals
import numpy as np
from PIL import Image
import time
import argparse
import os
import pyflow
parser = argparse.ArgumentParser(
description='Demo for python wrapper of Coarse2Fine Optical Flow')
parser.add_argument(
'-viz', dest='viz', action='store_true',
help='Visualize (i.e. save) output of flow.')
args = parser.parse_args()
examples_dir = "./examples"
im1 = np.array(Image.open(os.path.join(examples_dir, 'car1.jpg')))
im2 = np.array(Image.open(os.path.join(examples_dir, 'car2.jpg')))
im1 = im1.astype(float) / 255.
im2 = im2.astype(float) / 255.
# Flow Options:
alpha = 0.012 # default 0.012
ratio = 0.75 # default 0.75
minWidth = 20 # default 20
nOuterFPIterations = 7 # default 7
nInnerFPIterations = 1 # default 1
nSORIterations = 30 # default 30
colType = 0 # 0 or default:RGB, 1:GRAY (but pass gray image with shape (h,w,1))
threshold = 0.000005
s = time.time()
u, v, im2W = pyflow.coarse2fine_flow(
im1, im2, alpha, ratio, minWidth, nOuterFPIterations, nInnerFPIterations,
nSORIterations, colType, verbose = True, threshold = threshold)
e = time.time()
print('Time Taken: %.2f seconds for image of size (%d, %d, %d)' % (
e - s, im1.shape[0], im1.shape[1], im1.shape[2]))
flow = np.concatenate((u[..., None], v[..., None]), axis=2)
np.save(os.path.join(examples_dir, 'outFlow.npy'), flow)
always_viz = True
if args.viz or always_viz:
import cv2
hsv = np.zeros(im1.shape, dtype=np.uint8)
hsv[:, :, 0] = 255
hsv[:, :, 1] = 255
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
cv2.imwrite(os.path.join(examples_dir, 'outFlow_new.png'), rgb)
cv2.imwrite(os.path.join(examples_dir, 'img2Warped_new.jpg'), im2W[:, :, ::-1] * 255)
|
nilq/baby-python
|
python
|
from rect import Rect
from math import floor
class HashMap(object):
"""
Hashmap is a broad-phase collision detection strategy, which is quick
enough to build each frame.
"""
def __init__(self, cell_size):
self.cell_size = cell_size
self.grid = {}
@classmethod
def from_objects(cls, cell_size, objects):
"""
Build a HashMap from a list of objects which have a .rect attribute.
"""
h = cls(cell_size)
g = h.grid
for o in objects:
point = o.rect.left, o.rect.bottom
k = "%s%s" % (int((floor(point[0]/cell_size))*cell_size), int((floor(point[1]/cell_size))*cell_size))
g.setdefault(k,[]).append(o)
return h
def key(self, point):
cell_size = self.cell_size
return "%s%s" % (int((floor(point[0]/cell_size))*cell_size), int((floor(point[1]/cell_size))*cell_size))
def insert(self, obj, rect):
"""
Insert obj into the hashmap, based on rect.
"""
self.grid.setdefault(self.key((rect.left, rect.bottom)), []).append(obj)
def query(self, point):
"""
Return all objects in and around the cell specified by point.
"""
objects = []
x,y = point
s = self.cell_size
for p in (x-s,y-s),(x-s,y),(x-s,y+s),(x,y-s),(x,y),(x,y+s),(x+s,y-s),(x+s,y),(x+s,y+s):
objects.extend(self.grid.setdefault(self.key(p), []))
return objects
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
drawmap.py
@Purpose: draw a current map from a hdf mohid or netcdfoutput
@version: 1.0
@python version: 3.9
@author: Pedro Montero
@license: INTECMAR
@requires: matplotlib, numpy, toolkits.basemap
@date 2021/10/13
@history:
"""
import os
from common.readers.reader_factory import read_factory
from common import read_input
from common.boundarybox import BoundaryBox
from drawcurrents import drawcurrents
def read_inputs(input_file):
"""Read keywords for options"""
input_keys = ['path_in',
'file_in',
'path_out'
'file_out',
'nx',
'ny',
'resolution',
'scale',
'n_time',
'n_level',
'title',
'style',
'limits']
return read_input(input_file, input_keys)
def main():
"""
Main program:
:return:
"""
# Start
print("________________________________________\n")
print(" DRAWMAP")
print("________________________________________\n")
# Read input file
inputs = read_inputs('drawmap.json')
draw_map_1(inputs, 0)
#draw_map_24(inputs)
def draw_map_1(inputs, n ):
"""draw 1 maps of a day"""
draw_map = DrawMap(inputs)
draw_map.read_head()
draw_map.create_title(n)
draw_map.reader_uv_by_time(n)
print(draw_map.title_full)
draw_map.draw()
def draw_map_24(inputs):
"""draw 24+1 maps of a day"""
draw_map = DrawMap(inputs)
draw_map.read_head()
for n in range(draw_map.reader.ini_ntime, 25+draw_map.reader.ini_ntime):
draw_map.create_title(n)
draw_map.reader_uv_by_time(n)
print(draw_map.title_full)
draw_map.draw()
class DrawMap:
"""Class to draw a map with all options"""
def __init__(self, inputs):
self.file_path_in = inputs['path_in']
self.file_path_out = inputs['path_out']
self.file_in = inputs['file_in']
self.file_name = os.path.join(self.file_path_in, self.file_in)
self.file_hdf_out = inputs['file_out']
self.file_out = os.path.join(self.file_path_out, self.file_hdf_out)
self.nx = inputs['nx']
self.ny = inputs['ny']
self.scale = inputs['scale']
self.resolution = inputs['resolution']
self.style = inputs['style']
self.title = inputs['title']
self.level = inputs['n_level']
self.time = inputs['n_time']
limits = inputs['limits']
self.boundary_box = BoundaryBox(limits[0], limits[1], limits[2], limits[3])
self.u_name = inputs['u']
self.v_name = inputs['v']
self.reader = None
def read_head(self):
print('Opening: {0}'.format(self.file_name))
factory = read_factory(self.file_name)
self.reader = factory.get_reader()
with self.reader.open():
lat = self.reader.latitudes
lon = self.reader.longitudes
if self.reader.coordinates_rank == 1:
self.lats = lat[0:self.reader.n_latitudes - 1]
self.lons = lon[0:self.reader.n_longitudes - 1]
elif self.reader.coordinates_rank == 2:
self.lats = lat[0:self.reader.n_longitudes - 1, 0:self.reader.n_latitudes - 1]
self.lons = lon[0:self.reader.n_longitudes - 1, 0:self.reader.n_latitudes - 1]
def create_title(self, n_time):
with self.reader.open():
data = self.reader.get_date(n_time)
data_str = data.strftime("%Y-%m-%d %H:%M UTC")
data_comp = data.strftime("%Y%m%d%H%M")
self.title_full = self.title + " " + data_str
self.file_out_full = self.file_out + '_' + data_comp + '.png'
def reader_uv_by_time(self, n_time):
with self.reader.open():
u = self.reader.get_variable(self.u_name, n_time)
v = self.reader.get_variable(self.v_name, n_time)
if len(u.shape) == 3:
self.us = u[self.level, :-1, :- 1]
self.vs = v[self.level, :-1, :-1]
elif len(u.shape) == 2:
self.us = u[:-1, :-1]
self.vs = v[:-1, :-1]
self.mod = pow((pow(self.us, 2) + pow(self.vs, 2)), .5)
def reader_uv(self):
self.reader_uv_by_time(self.time)
def draw(self):
drawcurrents(self.reader.coordinates_rank, self.nx, self.ny, self.scale, self.resolution,
self.level, self.time, self.lats, self.lons, self.us, self.vs, self.mod,
self.file_out_full, self.title_full, self.style, self.boundary_box)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
a,b=map(int,input().split())
print(-~(a+b)//2)
|
nilq/baby-python
|
python
|
# script to create relationship classes in a GeMS database
#
# Somebody who uses relationship classes more than I should look at this.
# Particularly, are the Forward and Backward labels named as usefully as possible?
# Are there other issues?
#
# Note that Validate Database script can be used to maintain referential integrity,
# thus I don't suggest use of relationship classes to accomplish this.
# Ralph Haugerud, USGS
# Use this mostly in order to see related records in Identify tool in ArcMap or in
# the feature attribute pop-up in ArcGIS Pro. Note that the related table must in the map
# in order for the related records to be displayed
# Evan Thoms, USGS
# GeMS_RelationshipClasses_AGP2.py
# 6 June 2019: updated to work with Python 3 in ArcGIS Pro. Evan Thoms
# ran the script through 2to3. No other edits necessary
# renamed from GeMS_RelationshipClasses1_Arc10.py to GeMS_RelationshipClasses_AGP2.py
# 7 November 2019: In response to issue raised at repo, completely rewritten to
# 1) not create feature dataset just for the relationship classes. Found the
# feature dataset could be written, but could not write relationship classes there.
# Perhaps because it only had a name and no other properties, although this is probably
# not best practices anyway. Relationship classes are now written in the workspace
# in which the feature class is found
# 2) create relationship classes based on controlled fields, not a list of explicit
# relationship classes. Could result in many superfluous relationship classes
# 3) attempt to work with table and field names regardless of case
import arcpy
import sys
import os
from GeMS_utilityFunctions import *
versionString = 'GeMS_RelationshipClasses1_AGP2.py, version of 3 March 2021'
rawurl = 'https://raw.githubusercontent.com/usgs/gems-tools-pro/master/Scripts/GeMS_RelationshipClasses_AGP2.py'
checkVersion(versionString, rawurl, 'gems-tools-pro')
def fname_find(field_string, table):
# finds a field name regardless of the case of the search string (field_string)
fields = arcpy.ListFields(table)
for field in fields:
if field.name.lower() == field_string.lower():
return field.name
def tname_find(table_string):
#find a table name regardless of case of search string (table_string)
#searches the dictionary of table name: [root, path]
for key in tab_dict:
if key.lower() == table_string.lower():
return key
def rc_handler(key, value, foreign_search, primary_search, origin_search):
try:
#sanitize the field and table names in case everything is in lower or upper case
origin = tname_find(origin_search)
d_key = fname_find(foreign_search, value[1])
o_key = fname_find(primary_search, tab_dict[origin][1])
#check for existing relationship class
rc_name = '{}_{}'.format(key, d_key)
if arcpy.Exists(rc_name): arcpy.Delete_management(rc_name)
#create the relationship class
addMsgAndPrint('Building {}'.format(rc_name))
#print(tab_dict[origin][1],value[1],rc_name,'SIMPLE','{} in {}'.format(o_key, key),'{} in {}'.format(d_key, origin),
#'NONE','ONE_TO_MANY','NONE',o_key,d_key, sep=' | ')
arcpy.CreateRelationshipClass_management(tab_dict[origin][1],
value[1],
rc_name,
'SIMPLE',
'{} in {}'.format(o_key, key),
'{} in {}'.format(d_key, origin),
'NONE',
'ONE_TO_MANY',
'NONE',
o_key,
d_key)
except:
print('Could not create relationship class {}'.format(rc_name))
inGdb = sys.argv[1]
#make a dictionary of feature classes: [workspace, full path]
walkfc = arcpy.da.Walk(inGdb, datatype='FeatureClass')
fc_dict = {}
for root, dirs, files in walkfc:
for file in files:
fc_path = os.path.join(root, file)
if arcpy.Describe(fc_path).featureType != 'Annotation':
fc_dict[file] = [root, fc_path]
#make a dictionary of tables: [workspace, full path]
walktab = arcpy.da.Walk(inGdb, datatype='Table')
tab_dict = {}
for root, dirs, files in walktab:
for file in files:
if file.find('.') == -1:
tab_dict[file] = [root, os.path.join(root, file)]
#run through the feature classes and create appropriate relationship classes
for key, value in fc_dict.items():
arcpy.env.workspace = value[0]
for field in arcpy.ListFields(value[1]):
field_name = field.name.lower()
if field_name == 'type' or field_name.find('confidence')> 0 and field.type == 'String':
rc_handler(key, value, field.name, 'Term', 'Glossary')
if field_name.find('source_id') > 0:
rc_handler(key, value, field.name, 'DataSource_ID', 'DataSources')
if field_name == 'geomaterial':
rc_handler(key, value, field.name, 'GeoMaterial', 'GeoMaterialDict')
if field_name == 'mapunit':
rc_handler(key, value, field.name, 'MapUnit', 'DescriptionOfMapUnits')
addMsgAndPrint('Done')
|
nilq/baby-python
|
python
|
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import sys
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle('Paint')
self.setWindowIcon(QIcon('Images/Paint.png'))
self.setMaximumSize(860, 720)
self.setMinimumSize(860, 720)
self.UI()
def UI(self):
self.Image = QImage(self.size(), QImage.Format_RGB32)
self.Image.fill(Qt.white)
self.Drawing = False
self.BrushSize = 2
self.BrushColor = Qt.black
self.LastPoint = QPoint()
self.MainMenu = self.menuBar()
self.FileMenu = self.MainMenu.addMenu('File')
self.BrushSizeMenu = self.MainMenu.addMenu('Brush Size')
self.BrushColorMenu = self.MainMenu.addMenu('Brush Color')
self.OpenAction = QAction(QIcon('Images/Open.png'), 'Open', self)
self.OpenAction.setShortcut(QKeySequence.Open)
self.OpenAction.triggered.connect(self.Open)
self.FileMenu.addAction(self.OpenAction)
self.SaveAction = QAction(QIcon('Images/Save.png'), 'Save', self)
self.SaveAction.setShortcut(QKeySequence.Save)
self.SaveAction.triggered.connect(self.Save)
self.FileMenu.addAction(self.SaveAction)
self.ClearAction = QAction(QIcon('Images/Clear.png'), 'Clear', self)
self.ClearAction.triggered.connect(self.Clear)
self.FileMenu.addAction(self.ClearAction)
self.QuitAction = QAction(QIcon('Images/Quit.png'), 'Quit', self)
self.QuitAction.setShortcut(QKeySequence.Close)
self.QuitAction.triggered.connect(self.closeEvent)
self.FileMenu.addAction(self.QuitAction)
self.OnepxAction = QAction(QIcon('Images/1.png'), 'One', self)
self.OnepxAction.triggered.connect(self.One)
self.BrushSizeMenu.addAction(self.OnepxAction)
self.TwopxAction = QAction(QIcon('Images/2.png'), 'Two', self)
self.TwopxAction.triggered.connect(self.Two)
self.BrushSizeMenu.addAction(self.TwopxAction)
self.ThreepxAction = QAction(QIcon('Images/3.png'), 'Three', self)
self.ThreepxAction.triggered.connect(self.Three)
self.BrushSizeMenu.addAction(self.ThreepxAction)
self.FourpxAction = QAction(QIcon('Images/4.png'), 'Four', self)
self.FourpxAction.triggered.connect(self.Four)
self.BrushSizeMenu.addAction(self.FourpxAction)
self.FivepxAction = QAction(QIcon('Images/5.png'), 'Five', self)
self.FivepxAction.triggered.connect(self.Five)
self.BrushSizeMenu.addAction(self.FivepxAction)
self.SixpxAction = QAction(QIcon('Images/6.png'), 'Six', self)
self.SixpxAction.triggered.connect(self.Six)
self.BrushSizeMenu.addAction(self.SixpxAction)
self.SevenpxAction = QAction(QIcon('Images/7.png'), 'Seven', self)
self.SevenpxAction.triggered.connect(self.Seven)
self.BrushSizeMenu.addAction(self.SevenpxAction)
self.EightpxAction = QAction(QIcon('Images/7.png'), 'Eight', self)
self.EightpxAction.triggered.connect(self.Eight)
self.BrushSizeMenu.addAction(self.EightpxAction)
self.NinepxAction = QAction(QIcon('Images/9.png'), 'Nine', self)
self.NinepxAction.triggered.connect(self.Nine)
self.BrushSizeMenu.addAction(self.NinepxAction)
self.MessageAction = QAction(QIcon('Images/Colors'), 'Standard Colors', self)
self.MessageAction.setText('Standard Colors')
self.BrushColorMenu.addAction(self.MessageAction)
self.BrushColorMenu.addSeparator()
# self.WhiteAction = QAction(QIcon('Images/White.png'), 'White',self)
# self.WhiteAction.triggered.connect(self.White)
# self.BrushColorMenu.addAction(self.WhiteAction)
self.BlackAction = QAction(QIcon('Images/Black.png'), 'Black', self)
self.BlackAction.triggered.connect(self.Black)
self.BrushColorMenu.addAction(self.BlackAction)
self.DarkGrayAction = QAction(QIcon('Images/Dark Gray.png'), 'Dark Gray', self)
self.DarkGrayAction.triggered.connect(self.DarkGray)
self.BrushColorMenu.addAction(self.DarkGrayAction)
self.GrayAction = QAction(QIcon('Images/Gray.png'), 'Gray', self)
self.GrayAction.triggered.connect(self.Gray)
self.BrushColorMenu.addAction(self.GrayAction)
self.LightGrayAction = QAction(QIcon('Images/Light Gray.png'), 'Light Gray', self)
self.LightGrayAction.triggered.connect(self.LightGray)
self.BrushColorMenu.addAction(self.LightGrayAction)
self.DarkRedAction = QAction(QIcon('Images/Dark Red.png'), 'Dark Red', self)
self.DarkRedAction.triggered.connect(self.DarkRed)
self.BrushColorMenu.addAction(self.DarkRedAction)
self.BrownAction = QAction(QIcon('Images/Brown.png'), 'Brown', self)
self.BrownAction.triggered.connect(self.Brown)
self.BrushColorMenu.addAction(self.BrownAction)
self.RedAction = QAction(QIcon('Images/Red.png'), 'Red', self)
self.RedAction.triggered.connect(self.Red)
self.BrushColorMenu.addAction(self.RedAction)
self.PinkAction = QAction(QIcon('Images/Pink.png'), 'Pink', self)
self.PinkAction.triggered.connect(self.Pink)
self.BrushColorMenu.addAction(self.PinkAction)
self.DarkYellowAction = QAction(QIcon('Images/Dark Yellow.png'), 'Dark Yellow', self)
self.DarkYellowAction.triggered.connect(self.DarkYellow)
self.BrushColorMenu.addAction(self.DarkYellowAction)
self.YellowAction = QAction(QIcon('Images/Yellow.png'), 'Yellow', self)
self.YellowAction.triggered.connect(self.Yellow)
self.BrushColorMenu.addAction(self.YellowAction)
self.LightYellowAction = QAction(QIcon('Images/Light Yellow.png'), 'Light Yellow', self)
self.LightYellowAction.triggered.connect(self.LightYellow)
self.BrushColorMenu.addAction(self.LightYellowAction)
self.OrangeAction = QAction(QIcon('Images/Orange.png'), 'Orange', self)
self.OrangeAction.triggered.connect(self.Orange)
self.BrushColorMenu.addAction(self.OrangeAction)
self.DarkGreenAction = QAction(QIcon('Images/Dark Green.png'), 'Dark Green', self)
self.DarkGreenAction.triggered.connect(self.DarkGreen)
self.BrushColorMenu.addAction(self.DarkGreenAction)
self.GreenAction = QAction(QIcon('Images/Green.png'), 'Green', self)
self.GreenAction.triggered.connect(self.Green)
self.BrushColorMenu.addAction(self.GreenAction)
self.LightGreenAction = QAction(QIcon('Images/Light Green.png'), 'Light Green', self)
self.LightGreenAction.triggered.connect(self.LightGreen)
self.BrushColorMenu.addAction(self.LightGreenAction)
self.LightBlueAction = QAction(QIcon('Images/Light Blue.png'), 'Light Blue', self)
self.LightBlueAction.triggered.connect(self.LightBlue)
self.BrushColorMenu.addAction(self.LightBlueAction)
self.BlueAction = QAction(QIcon('Images/Blue.png'), 'Blue', self)
self.BlueAction.triggered.connect(self.Blue)
self.BrushColorMenu.addAction(self.BlueAction)
self.DarkBlueAction = QAction(QIcon('Images/Dark Blue.png'), 'Dark Blue', self)
self.DarkBlueAction.triggered.connect(self.DarkBlue)
self.BrushColorMenu.addAction(self.DarkBlueAction)
self.MarineBlueAction = QAction(QIcon('Images/Marine Blue.png'), 'Marine Blue', self)
self.MarineBlueAction.triggered.connect(self.MarineBlue)
self.BrushColorMenu.addAction(self.MarineBlueAction)
self.VioletAction = QAction(QIcon('Images/Purple.png'), 'Purple', self)
self.VioletAction.triggered.connect(self.Purple)
self.BrushColorMenu.addAction(self.VioletAction)
self.BrushColorMenu.addSeparator()
self.SelectColorAction = QAction(QIcon('Images/Color Picker.png'), 'Select Color', self)
self.SelectColorAction.triggered.connect(self.ColorDialog)
self.BrushColorMenu.addAction(self.SelectColorAction)
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
self.Drawing = True
self.LastPoint = event.pos()
# print(self.LastPoint)
def mouseMoveEvent(self, event):
if (event.buttons() == Qt.LeftButton) and self.Drawing:
Painter = QPainter(self.Image)
Painter.setPen(QPen(self.BrushColor, self.BrushSize,
Qt.SolidLine, Qt.RoundCap,
Qt.RoundJoin))
Painter.drawLine(self.LastPoint, event.pos())
self.LastPoint = event.pos()
self.update()
def mouseReleaseEvent(self, event):
if event.button == Qt.LeftButton:
self.Drawing = False
def paintEvent(self, event):
canvasPainter = QPainter(self)
canvasPainter.drawImage(self.rect(), self.Image,
self.Image.rect())
def Save(self):
filePath, _ = QFileDialog.getSaveFileName(self,
'Save Image',
'',
'*.PNG')
if filePath == '':
return
self.Image.save(filePath)
def Clear(self):
self.Image.fill(Qt.white)
self.update()
def closeEvent(self, event):
text = '''Are you sure you want to Quit?\nAny unsaved work will be lost.'''
reply = QMessageBox.question(
self, 'Warning!', text,
QMessageBox.Save | QMessageBox.Cancel | QMessageBox.Close
)
if reply == QMessageBox.Close:
QApplication.quit()
elif reply == QMessageBox.Save:
self.Save()
elif reply == QMessageBox.Cancel:
pass
else:
pass
def keyPressEvent(self, event):
if event.key() == Qt.Key_Escape:
QApplication.quit()
def ColorDialog(self):
self.Dailog = QColorDialog()
self.Dailog.exec_()
self.Value = self.Dailog.selectedColor()
print(self.Value)
self.BrushColor = self.Value
def Open(self, event):
filename = QFileDialog.getOpenFileName()
imagepath = filename[0]
print(imagepath)
image = QImage(imagepath)
painter = QPainter(self.Image)
painter.setPen(QPen(Qt.NoPen))
height = image.height()
width = image.width()
painter.drawImage(QRect(0, 0, height * 2, width * 2), image)
def One(self):
self.BrushSize = 1
def Two(self):
self.BrushSize = 2
def Three(self):
self.BrushSize = 3
def Four(self):
self.BrushSize = 4
def Five(self):
self.BrushSize = 5
def Six(self):
self.BrushSize = 6
def Seven(self):
self.BrushSize = 7
def Eight(self):
self.BrushSize = 8
def Nine(self):
self.BrushSize = 9
def Black(self):
self.BrushColor = Qt.black
def Red(self):
self.BrushColor = Qt.red
def Yellow(self):
self.BrushColor = Qt.yellow
def LightBlue(self):
self.BrushColor = QColor(173, 216, 230)
def DarkBlue(self):
self.BrushColor = Qt.darkBlue
def Orange(self):
self.BrushColor = QColor(255, 165, 0)
def LightGreen(self):
self.BrushColor = QColor(144, 238, 144)
def DarkGreen(self):
self.BrushColor = Qt.darkGreen
def Purple(self):
self.BrushColor = QColor(128, 0, 128)
def Brown(self):
self.BrushColor = QColor(165, 42, 42)
def White(self):
self.BrushColor = Qt.white
def MarineBlue(self):
self.BrushColor = QColor(0, 119, 190)
def LightGray(self):
self.BrushColor = Qt.lightGray
def DarkGray(self):
self.BrushColor = Qt.darkGray
def Gray(self):
self.BrushColor = Qt.gray
def DarkRed(self):
self.BrushColor = Qt.darkRed
def Pink(self):
self.BrushColor = QColor(247, 120, 193)
def DarkYellow(self):
self.BrushColor = Qt.darkYellow
def LightYellow(self):
self.BrushColor = QColor(244, 247, 155)
def Green(self):
self.BrushColor = Qt.green
def Blue(self):
self.BrushColor = Qt.blue
App = QApplication(sys.argv)
Main = MainWindow()
Main.resize(640, 480)
Main.show()
sys.exit(App.exec_())
|
nilq/baby-python
|
python
|
# Run detection in real-time setting on a COCO-format dataset
import argparse, json, pickle
from os.path import join, isfile
from time import perf_counter
from tqdm import tqdm
import numpy as np
import torch
from pycocotools.coco import COCO
from mmcv.runner import load_checkpoint
import sys; sys.path.insert(0, '..'); sys.path.insert(0, '.')
from util import mkdir2, print_stats
from det import imread, parse_det_result, vis_det, eval_ccf
from det.det_apis import \
init_detector, inference_detector, \
ImageTransform, ImageTransformGPU, _prepare_data
import train.models
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data-root', type=str, required=True)
parser.add_argument('--annot-path', type=str, required=True)
parser.add_argument('--in-scale', type=float, default=None)
parser.add_argument('--fps', type=float, default=30)
parser.add_argument('--no-mask', action='store_true', default=False)
parser.add_argument('--no-class-mapping', action='store_true', default=False)
parser.add_argument('--cpu-pre', action='store_true', default=False)
parser.add_argument('--out-dir', type=str, required=True)
parser.add_argument('--vis-dir', type=str, default=None)
parser.add_argument('--vis-scale', type=float, default=1)
parser.add_argument('--config', type=str, default=None)
parser.add_argument('--weights', type=str, default=None)
parser.add_argument('--weights-base', type=str, default=None)
parser.add_argument('--n-history', type=int, default=None)
parser.add_argument('--n-future', type=int, default=None)
parser.add_argument('--no-eval', action='store_true', default=False)
parser.add_argument('--overwrite', action='store_true', default=False)
opts = parser.parse_args()
return opts
def main():
assert torch.cuda.device_count() == 1 # mmdet only supports single GPU testing
opts = parse_args()
mkdir2(opts.out_dir)
vis_out = bool(opts.vis_dir)
if vis_out:
mkdir2(opts.vis_dir)
db = COCO(opts.annot_path)
class_names = [c['name'] for c in db.dataset['categories']]
n_class = len(class_names)
coco_mapping = None if opts.no_class_mapping else db.dataset.get('coco_mapping', None)
if coco_mapping is not None:
coco_mapping = np.asarray(coco_mapping)
seqs = db.dataset['sequences']
seq_dirs = db.dataset['seq_dirs']
model = init_detector(opts)
if opts.weights_base is not None:
# for distillation purpose
load_checkpoint(model, opts.weights_base)
if opts.cpu_pre:
img_transform = ImageTransform(
size_divisor=model.cfg.data.test.size_divisor, **model.cfg.img_norm_cfg)
else:
img_transform = ImageTransformGPU(
size_divisor=model.cfg.data.test.size_divisor, **model.cfg.img_norm_cfg)
device = next(model.parameters()).device # model device
n_history = model.cfg.data.train.n_history if opts.n_history is None else opts.n_history
n_future = model.cfg.data.train.n_future if opts.n_future is None else opts.n_future
results_ccf = [] # instance based
runtime_all = []
for sid, seq in enumerate(tqdm(seqs)):
# print(seq)
frame_list = [img for img in db.imgs.values() if img['sid'] == sid]
n_frame = len(frame_list)
# load all frames in advance
frames = []
for img in frame_list:
img_path = join(opts.data_root, seq_dirs[sid], img['name'])
frames.append(imread(img_path))
with torch.no_grad():
preprocessed = []
for i in range(n_history):
data = _prepare_data(frames[i], img_transform, model.cfg, device)
preprocessed.append(data)
for ii in range(n_history, n_frame - n_future):
# target frame
iid = frame_list[ii + n_future]['id']
img_name = frame_list[ii + n_future]['name']
I = frames[ii + n_future]
t_start = perf_counter()
# input frame
data = _prepare_data(frames[ii], img_transform, model.cfg, device)
# if n_history == 0:
# data_merge = data
# # print(data['img'])
# # print(data['img'][0].shape)
# # print(data['img'][0][0][0][300][300:305])
# # import sys
# # sys.exit()
# else:
preprocessed.append(data)
# print(preprocessed[0]['img'][0].data_ptr())
# print(preprocessed[2]['img'][0].data_ptr())
# print(torch.all(preprocessed[0]['img'][0] == preprocessed[2]['img'][0]))
imgs = [d['img'][0] for d in preprocessed]
imgs = torch.cat(imgs, 0)
imgs = imgs.unsqueeze(0)
data_merge = {
'img': [imgs],
'img_meta': data['img_meta'],
}
# print(data_merge['img'][0][0][2][0][300][300:305])
# import sys
# sys.exit()
result = model(return_loss=False, rescale=True, numpy_res=True, **data_merge)
bboxes, scores, labels, masks = \
parse_det_result(result, coco_mapping, n_class)
# if ii == 2:
# print(ii, scores)
# import sys
# sys.exit()
# if n_history != 0:
del preprocessed[0]
t_end = perf_counter()
runtime_all.append(t_end - t_start)
if vis_out:
vis_path = join(opts.vis_dir, seq, img_name[:-3] + 'jpg')
if opts.overwrite or not isfile(vis_path):
vis_det(
I, bboxes, labels,
class_names, masks, scores,
out_scale=opts.vis_scale,
out_file=vis_path
)
# convert to coco fmt
n = len(bboxes)
if n:
bboxes[:, 2:] -= bboxes[:, :2]
for i in range(n):
result_dict = {
'image_id': iid,
'bbox': bboxes[i],
'score': scores[i],
'category_id': labels[i],
}
if masks is not None:
result_dict['segmentation'] = masks[i]
results_ccf.append(result_dict)
out_path = join(opts.out_dir, 'time_info.pkl')
if opts.overwrite or not isfile(out_path):
pickle.dump({
'runtime_all': runtime_all,
'n_total': len(runtime_all),
}, open(out_path, 'wb'))
# convert to ms for display
s2ms = lambda x: 1e3*x
print_stats(runtime_all, 'Runtime (ms)', cvt=s2ms)
out_path = join(opts.out_dir, 'results_ccf.pkl')
if opts.overwrite or not isfile(out_path):
pickle.dump(results_ccf, open(out_path, 'wb'))
if not opts.no_eval:
eval_summary = eval_ccf(db, results_ccf)
out_path = join(opts.out_dir, 'eval_summary.pkl')
if opts.overwrite or not isfile(out_path):
pickle.dump(eval_summary, open(out_path, 'wb'))
if vis_out:
print(f'python vis/make_videos.py "{opts.vis_dir}"')
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import os
import glob
from imageai.Detection.Custom import DetectionModelTrainer
execution_path = os.getcwd()
models_path = os.path.join(execution_path, "doge-identification/models/")
data_path = os.path.join(execution_path, "doge-identification/")
# This will force tensorflow to run on the cpu
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
list_of_files = glob.glob(models_path + "*.h5")
latest_model_path = max(list_of_files, key=os.path.getctime)
path, newest_model = os.path.split(latest_model_path)
print("Newest Model: ", newest_model)
trainer = DetectionModelTrainer()
trainer.setModelTypeAsYOLOv3()
trainer.setDataDirectory(data_directory=data_path)
metrics = trainer.evaluateModel(
model_path=latest_model_path,
json_path=os.path.join(execution_path, "doge-identification/json/detection_config.json"),
iou_threshold=0.5,
object_threshold=0.3,
nms_threshold=0.5)
|
nilq/baby-python
|
python
|
# This file is part of the CERN Indico plugins.
# Copyright (C) 2014 - 2020 CERN
#
# The CERN Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License; see
# the LICENSE file for more details.
from __future__ import unicode_literals
from wtforms.fields import StringField
from wtforms.validators import DataRequired
from indico.web.forms.base import IndicoForm
from indico.web.forms.fields import IndicoPasswordField
from indico_livesync_cern import _
class SettingsForm(IndicoForm):
username = StringField(_("Username"), validators=[DataRequired()],
description=_("The username to access the category ID/title mapping"))
password = IndicoPasswordField(_('Password'), [DataRequired()], toggle=True,
description=_("The password to access the category ID/title mapping"))
|
nilq/baby-python
|
python
|
"""Error handlers."""
from werkzeug.http import HTTP_STATUS_CODES
from flask import jsonify
from muckr_api.extensions import database
class APIError(Exception):
def __init__(self, status_code, message=None, details=None):
super().__init__()
error = HTTP_STATUS_CODES.get(status_code, "Unknown error")
self.status_code = status_code
self.payload = {"error": error}
if message is not None:
self.payload["message"] = message
if details is not None:
self.payload["details"] = details
def handle(self):
response = jsonify(self.payload)
response.status_code = self.status_code
response.mimetype = "application/json"
return response
def handle_error(error):
# If a HTTPException, pull the `code` attribute; default to 500
status_code = getattr(error, "code", 500)
if status_code == 500:
database.session.rollback()
return APIError(status_code).handle()
|
nilq/baby-python
|
python
|
from django.conf.urls import url
from .views import Dashboard
urlpatterns = [
url(r'^$', Dashboard.as_view()),
]
|
nilq/baby-python
|
python
|
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
class RequestCache(dict):
# stats info needed for testing
_hits = 0
_misses = 0
_sets = 0
def get(self, key, default=None):
try:
value = self[key]
except KeyError:
return default
return value
def __getitem__(self, key):
try:
value = super(RequestCache, self).__getitem__(key)
except KeyError as e:
self._misses += 1
raise e
self._hits += 1
return value
def __setitem__(self, key, value):
super(RequestCache, self).__setitem__(key, value)
self._sets += 1
def clear(self):
super(RequestCache, self).clear()
self._hits = 0
self._misses = 0
self._sets = 0
def stats(self):
stats = {'hits': self._hits,
'misses': self._misses,
'sets': self._sets}
return stats
def __str__(self):
return ('<RequestCache {0} items (hits: {1}, misses: {2},',
' sets: {3})>').format(len(self), self._hits,
self._misses, self._sets)
|
nilq/baby-python
|
python
|
from typing import List
import msgpack
from pydantic import BaseModel
import ormsgpack
class Member(BaseModel):
id: int
active: bool
class Object(BaseModel):
id: int
name: str
members: List[Member]
objects_as_pydantic = [
Object(
id=i, name=str(i) * 3, members=[Member(id=j, active=True) for j in range(0, 10)]
)
for i in range(100000, 102000)
]
def default(__obj):
if isinstance(__obj, BaseModel):
return __obj.dict()
def test_pydantic_msgpack(benchmark):
benchmark.group = "pydantic"
benchmark(msgpack.packb, objects_as_pydantic, default=default)
def test_pydantic_ormsgpack(benchmark):
benchmark.group = "pydantic"
benchmark(
ormsgpack.packb, objects_as_pydantic, option=ormsgpack.OPT_SERIALIZE_PYDANTIC
)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from breakmywork.application import main
main()
|
nilq/baby-python
|
python
|
import inferpy as inf
def test_parameter_in_pmodel():
# test that random variables in pmodel works even if no name has been provided
@inf.probmodel
def model():
inf.Parameter(0)
v = list(model().params.values())[0]
assert v.name.startswith('parameter')
# assert also is_datamodel is false
assert not v.is_datamodel
def test_parameter_in_datamodel():
with inf.datamodel(10):
x = inf.Parameter(0)
# assert that is_datamodel is true
assert x.is_datamodel
def test_run_in_session():
x = inf.Parameter(0)
assert inf.get_session().run(x) == 0
|
nilq/baby-python
|
python
|
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Phase2C11I13_cff import Phase2C11I13
from Configuration.Eras.Modifier_phase2_3DPixels_cff import phase2_3DPixels
from Configuration.Eras.Modifier_phase2_GE0_cff import phase2_GE0
Phase2C11I13T25M9 = cms.ModifierChain(Phase2C11I13, phase2_3DPixels, phase2_GE0)
|
nilq/baby-python
|
python
|
import numpy as np
class RandomParameters:
"""
Example params are in JSON format:
{
"booster": ["gbtree", "gblinear"],
"objective": ["binary:logistic"],
"eval_metric": ["auc", "logloss"],
"eta": [0.0025, 0.005, 0.0075, 0.01, 0.025, 0.05, 0.075, 0.1]
}
"""
@staticmethod
def get(params, seed=1):
np.random.seed(seed)
generated_params = {"seed": seed}
for k in params:
generated_params[k] = np.random.permutation(params[k])[0].item()
return generated_params
|
nilq/baby-python
|
python
|
import sys,os,pygame
|
nilq/baby-python
|
python
|
# Generated by Django 3.0.6 on 2020-05-25 00:02
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Cats',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('birthday', models.DateField()),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='GSBrand',
fields=[
('brand', models.CharField(max_length=100, primary_key=True, serialize=False)),
('concentration', models.DecimalField(decimal_places=2, max_digits=2)),
],
),
migrations.CreateModel(
name='WarriorAdmin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('warrior_admin', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='UserExtension',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('userid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('warrior_admin', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='InjectionLog.WarriorAdmin')),
],
),
migrations.CreateModel(
name='InjectionLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_added', models.DateField(default=datetime.date.today, editable=False)),
('cat_weight', models.DecimalField(decimal_places=2, max_digits=2)),
('injection_time', models.TimeField()),
('injection_amount', models.DecimalField(decimal_places=1, max_digits=2)),
('cat_behavior_today', models.IntegerField(default=3)),
('injection_notes', models.TextField(null=True)),
('gaba_dose', models.IntegerField(null=True)),
('other_notes', models.TextField(null=True)),
('cat_name', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='InjectionLog.Cats')),
('gs_brand', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='InjectionLog.GSBrand')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
),
]
|
nilq/baby-python
|
python
|
from tests.save_restore_cursor import SaveRestoreCursorTests
import esccmd
from escutil import knownBug
class DECSETTiteInhibitTests(SaveRestoreCursorTests):
def __init__(self):
SaveRestoreCursorTests.__init__(self)
def saveCursor(self):
esccmd.DECSET(esccmd.SaveRestoreCursor)
def restoreCursor(self):
esccmd.DECRESET(esccmd.SaveRestoreCursor)
@knownBug(terminal="iTerm2", reason="Not implemented")
def test_SaveRestoreCursor_Basic(self):
SaveRestoreCursorTests.test_SaveRestoreCursor_Basic(self)
@knownBug(terminal="iTerm2", reason="Not implemented")
def test_SaveRestoreCursor_MoveToHomeWhenNotSaved(self):
SaveRestoreCursorTests.test_SaveRestoreCursor_MoveToHomeWhenNotSaved(self)
@knownBug(terminal="iTerm2", reason="Not implemented")
def test_SaveRestoreCursor_ResetsOriginMode(self):
SaveRestoreCursorTests.test_SaveRestoreCursor_ResetsOriginMode(self)
@knownBug(terminal="iTerm2", reason="Not implemented")
def test_SaveRestoreCursor_WorksInLRM(self, shouldWork=True):
SaveRestoreCursorTests.test_SaveRestoreCursor_WorksInLRM(self)
@knownBug(terminal="iTerm2", reason="Not implemented")
def test_SaveRestoreCursor_AltVsMain(self):
SaveRestoreCursorTests.test_SaveRestoreCursor_AltVsMain(self)
@knownBug(terminal="iTerm2", reason="Not implemented")
def test_SaveRestoreCursor_Protection(self):
SaveRestoreCursorTests.test_SaveRestoreCursor_Protection(self)
@knownBug(terminal="iTerm2", reason="Not implemented")
def test_SaveRestoreCursor_Wrap(self):
SaveRestoreCursorTests.test_SaveRestoreCursor_Wrap(self)
@knownBug(terminal="iTerm2", reason="Not implemented", noop=True)
def test_SaveRestoreCursor_ReverseWrapNotAffected(self):
SaveRestoreCursorTests.test_SaveRestoreCursor_ReverseWrapNotAffected(self)
@knownBug(terminal="iTerm2", reason="Not implemented", noop=True)
def test_SaveRestoreCursor_InsertNotAffected(self):
SaveRestoreCursorTests.test_SaveRestoreCursor_InsertNotAffected(self)
|
nilq/baby-python
|
python
|
from domain.rules import game_loops
connect_game = game_loops.Game_Loops()
''' here we call the game loop function'''
connect_game.connect_four_game()
|
nilq/baby-python
|
python
|
from flask import Flask, request, render_template, url_for, send_file
import qrcode
from PIL import Image
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/result', methods=["POST", "GET"])
def result():
if request.method == "POST":
input = request.form
data = input["text-input"]
filename = str(input["filename-input"] + ".png")
# Convert
qr = qrcode.QRCode(
version = 5,
error_correction=qrcode.constants.ERROR_CORRECT_H,
box_size = 10,
border = 5
)
# adding data
qr.add_data(data)
img = qr.make_image(fill="black", back_color="white")
# saving result
img.save(filename)
return send_file(filename, mimetype="image")
if __name__ == "__main__":
app.run(debug=True)
|
nilq/baby-python
|
python
|
# python peripherals
import os
import numpy
import random
import queue
from multiprocessing import Process, Queue, cpu_count
# torch
import torch
from torch.utils.data import Dataset
# deep_signature
from deep_signature.data_generation import curve_generation
from deep_signature.data_generation import dataset_generation
from deep_signature.data_manipulation import curve_processing
class DeepSignaturePairsDataset(Dataset):
def __init__(self):
self._pairs = None
self._labels = None
def load_dataset(self, negative_pairs_dir_path, positive_pairs_dir_path):
negative_pairs = numpy.load(file=os.path.normpath(os.path.join(negative_pairs_dir_path, 'negative_pairs.npy')), allow_pickle=True)
positive_pairs = numpy.load(file=os.path.normpath(os.path.join(positive_pairs_dir_path, 'positive_pairs.npy')), allow_pickle=True)
full_pairs_count = negative_pairs.shape[0] + positive_pairs.shape[0]
random.shuffle(negative_pairs)
random.shuffle(positive_pairs)
self._pairs = numpy.empty((full_pairs_count, negative_pairs.shape[1], negative_pairs.shape[2], negative_pairs.shape[3]))
self._pairs[:negative_pairs.shape[0], :] = negative_pairs
self._pairs[negative_pairs.shape[0]:, :] = positive_pairs
negaitve_labels = numpy.zeros(negative_pairs.shape[0])
positive_labels = numpy.ones(positive_pairs.shape[0])
self._labels = numpy.empty(full_pairs_count)
self._labels[:negative_pairs.shape[0]] = negaitve_labels
self._labels[negative_pairs.shape[0]:] = positive_labels
def __len__(self):
return self._labels.shape[0]
def __getitem__(self, idx):
pair = self._pairs[idx, :]
for i in range(2):
if not curve_processing.is_ccw(curve=pair[i]):
pair[i] = numpy.flip(pair[i], axis=0)
for i in range(2):
radians = curve_processing.calculate_secant_angle(curve=pair[i])
pair[i] = curve_processing.rotate_curve(curve=pair[i], radians=radians)
pair_torch = torch.from_numpy(pair).cuda().double()
label_torch = torch.from_numpy(numpy.array([self._labels[idx]])).cuda().double()
return {
'input': pair_torch,
'labels': label_torch
}
class DeepSignatureTupletsDataset(Dataset):
def __init__(self):
self._tuplets = None
def load_dataset(self, dir_path):
self._tuplets = numpy.load(file=os.path.normpath(os.path.join(dir_path, 'tuplets.npy')), allow_pickle=True)
def __len__(self):
return self._tuplets.shape[0]
def __getitem__(self, index):
item = {}
tuplet = self._tuplets[index]
for key in tuplet.keys():
item[key] = torch.from_numpy(numpy.array(self._tuplets[index][key]).astype('float64')).cuda().double()
return item
class EuclideanTuple:
@staticmethod
def _generate_curvature_tuple(curves, sampling_ratio, multimodality, supporting_points_count, offset_length, negative_examples_count):
return dataset_generation.EuclideanCurvatureTupletsDatasetGenerator.generate_tuple(
curves=curves,
sampling_ratio=sampling_ratio,
multimodality=multimodality,
supporting_points_count=supporting_points_count,
offset_length=offset_length,
negative_examples_count=negative_examples_count)
@staticmethod
def _generate_arclength_tuple(curves, min_offset, max_offset, multimodality, supporting_points_count, anchor_points_count):
return dataset_generation.EuclideanArcLengthTupletsDatasetGenerator.generate_tuple(
curves=curves,
min_offset=min_offset,
max_offset=max_offset,
multimodality=multimodality,
supporting_points_count=supporting_points_count,
anchor_points_count=anchor_points_count)
class EquiaffineTuple:
@staticmethod
def _generate_curvature_tuple(curves, sampling_ratio, multimodality, supporting_points_count, offset_length, negative_examples_count):
return dataset_generation.EquiaffineCurvatureTupletsDatasetGenerator.generate_tuple(
curves=curves,
sampling_ratio=sampling_ratio,
multimodality=multimodality,
supporting_points_count=supporting_points_count,
offset_length=offset_length,
negative_examples_count=negative_examples_count)
@staticmethod
def _generate_arclength_tuple(curves, min_offset, max_offset, multimodality, supporting_points_count, anchor_points_count):
return dataset_generation.EquiaffineArcLengthTupletsDatasetGenerator.generate_tuple(
curves=curves,
min_offset=min_offset,
max_offset=max_offset,
multimodality=multimodality,
supporting_points_count=supporting_points_count,
anchor_points_count=anchor_points_count)
class AffineTuple:
@staticmethod
def _generate_curvature_tuple(curves, sampling_ratio, multimodality, supporting_points_count, offset_length, negative_examples_count):
return dataset_generation.AffineCurvatureTupletsDatasetGenerator.generate_tuple(
curves=curves,
sampling_ratio=sampling_ratio,
multimodality=multimodality,
supporting_points_count=supporting_points_count,
offset_length=offset_length,
negative_examples_count=negative_examples_count)
@staticmethod
def _generate_arclength_tuple(curves, min_offset, max_offset, multimodality, supporting_points_count, anchor_points_count):
return dataset_generation.AffineArcLengthTupletsDatasetGenerator.generate_tuple(
curves=curves,
min_offset=min_offset,
max_offset=max_offset,
multimodality=multimodality,
supporting_points_count=supporting_points_count,
anchor_points_count=anchor_points_count)
class DeepSignatureTupletsOnlineDataset(Dataset):
def __init__(self, dataset_size, dir_path, multimodality, replace, buffer_size, num_workers):
self._curves = curve_generation.CurvesGenerator.load_curves(dir_path)
self._dataset_size = dataset_size
self._multimodality = multimodality
self._replace = replace
self._buffer_size = buffer_size
self._num_workers = num_workers
self._q = Queue(maxsize=dataset_size)
self._args = [self._curves, self._multimodality, self._q]
self._items = []
def __len__(self):
return self._dataset_size
def __getitem__(self, index):
item = {}
mod_index = numpy.mod(index, self._buffer_size)
tuplet = self._items[mod_index]
for key in tuplet.keys():
if key == 'input':
item[key] = torch.from_numpy(numpy.array(tuplet[key]).astype('float64')).cuda().double()
else:
item[key] = tuplet[key]
if self._replace is True:
try:
new_tuplet = self._q.get_nowait()
rand_index = int(numpy.random.randint(self._buffer_size, size=1))
self._items[rand_index] = new_tuplet
except queue.Empty:
pass
return item
def start(self):
self._workers = [Process(target=self._map_func, args=self._args) for i in range(self._num_workers)]
for i, worker in enumerate(self._workers):
worker.start()
print(f'\rWorker Started {i+1} / {self._num_workers}', end='')
print(f'\nItem {len(self._items)} / {self._buffer_size}', end='')
while True:
if self._q.empty() is False:
self._items.append(self._q.get())
print(f'\rItem {len(self._items)} / {self._buffer_size}', end='')
if len(self._items) == self._buffer_size:
break
def stop(self):
for i, worker in enumerate(self._workers):
worker.terminate()
class DeepSignatureCurvatureTupletsOnlineDataset(DeepSignatureTupletsOnlineDataset):
def __init__(self, dataset_size, dir_path, multimodality, replace, buffer_size, num_workers, sampling_ratio, supporting_points_count, offset_length, negative_examples_count):
DeepSignatureTupletsOnlineDataset.__init__(
self,
dataset_size=dataset_size,
dir_path=dir_path,
multimodality=multimodality,
replace=replace,
buffer_size=buffer_size,
num_workers=num_workers)
self._sampling_ratio = sampling_ratio
self._args.append(sampling_ratio)
self._supporting_points_count = supporting_points_count
self._args.append(supporting_points_count)
self._offset_length = offset_length
self._args.append(offset_length)
self._negative_examples_count = negative_examples_count
self._args.append(negative_examples_count)
@classmethod
def _map_func(cls, curves, multimodality, q, sampling_ratio, supporting_points_count, offset_length, negative_examples_count):
while True:
q.put(cls._generate_curvature_tuple(
curves=curves,
sampling_ratio=sampling_ratio,
multimodality=multimodality,
supporting_points_count=supporting_points_count,
offset_length=offset_length,
negative_examples_count=negative_examples_count))
class DeepSignatureEuclideanCurvatureTupletsOnlineDataset(DeepSignatureCurvatureTupletsOnlineDataset, EuclideanTuple):
pass
class DeepSignatureEquiaffineCurvatureTupletsOnlineDataset(DeepSignatureCurvatureTupletsOnlineDataset, EquiaffineTuple):
pass
class DeepSignatureAffineCurvatureTupletsOnlineDataset(DeepSignatureCurvatureTupletsOnlineDataset, AffineTuple):
pass
class DeepSignatureArclengthTupletsOnlineDataset(DeepSignatureTupletsOnlineDataset):
def __init__(self, dataset_size, dir_path, multimodality, replace, buffer_size, num_workers, supporting_points_count, min_offset, max_offset, anchor_points_count):
DeepSignatureTupletsOnlineDataset.__init__(
self,
dataset_size=dataset_size,
dir_path=dir_path,
multimodality=multimodality,
replace=replace,
buffer_size=buffer_size,
num_workers=num_workers)
self._supporting_points_count = supporting_points_count
self._args.append(supporting_points_count)
self._min_offset = min_offset
self._args.append(min_offset)
self._max_offset = max_offset
self._args.append(max_offset)
self._anchor_points_count = anchor_points_count
self._args.append(anchor_points_count)
@classmethod
def _map_func(cls, curves, multimodality, q, supporting_points_count, min_offset, max_offset, anchor_points_count):
while True:
q.put(cls._generate_arclength_tuple(
curves=curves,
min_offset=min_offset,
max_offset=max_offset,
multimodality=multimodality,
supporting_points_count=supporting_points_count,
anchor_points_count=anchor_points_count))
class DeepSignatureEuclideanArclengthTupletsOnlineDataset(DeepSignatureArclengthTupletsOnlineDataset, EuclideanTuple):
pass
class DeepSignatureEquiaffineArclengthTupletsOnlineDataset(DeepSignatureArclengthTupletsOnlineDataset, EquiaffineTuple):
pass
class DeepSignatureAffineArclengthTupletsOnlineDataset(DeepSignatureArclengthTupletsOnlineDataset, AffineTuple):
pass
|
nilq/baby-python
|
python
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
# standard library
# django
from django.contrib import admin
from django.core.urlresolvers import NoReverseMatch
from django.core.urlresolvers import resolve
from django.core.urlresolvers import reverse
from django.db import models
from django.test import TestCase
# urls
from project.urls import urlpatterns
# utils
from base.utils import camel_to_underscore
from base.utils import get_our_models
# Third-party app imports
from model_mommy import mommy
from model_mommy import random_gen
class BaseTestCase(TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.password = random_gen.gen_text()
self.user = mommy.prepare('users.User')
self.user.set_password(self.password)
self.user.save()
self.login()
def login(self, user=None, password=None):
if user is None:
user = self.user
password = self.password
return self.client.login(email=user.email, password=password)
class IntegrityOnDeleteTestCase(BaseTestCase):
def create_full_object(self, model):
kwargs = {}
for f in model._meta.fields:
if isinstance(f, models.fields.related.ForeignKey) and f.null:
kwargs[f.name] = mommy.make(f.rel.to)
return mommy.make(model, **kwargs), kwargs
def test_integrity_on_delete(self):
for model in get_our_models():
obj, related_nullable_objects = self.create_full_object(model)
obj_count = model.objects.count()
for relation_name, rel_obj in related_nullable_objects.items():
try:
# check if the test should be skipped
if relation_name in obj.exclude_on_on_delete_test:
continue
except AttributeError:
pass
rel_obj.delete()
error_msg = (
'<{}> object, was deleted after deleting a nullable '
'related <{}> object, the relation was "{}"'
).format(model.__name__, rel_obj.__class__.__name__,
relation_name)
self.assertEqual(obj_count, model.objects.count(), error_msg)
# feedback that the test passed
print('.', end='')
def reverse_pattern(pattern, namespace, args=None, kwargs=None):
try:
if namespace:
return reverse('{}:{}'.format(
namespace, pattern.name, args=args, kwargs=kwargs)
)
else:
return reverse(pattern.name, args=args, kwargs=kwargs)
except NoReverseMatch:
return None
class UrlsTest(BaseTestCase):
def setUp(self):
super(UrlsTest, self).setUp()
# we are going to send parameters, so one thing we'll do is to send
# tie id 1
self.user.delete()
self.user.id = 1
# give the user all the permissions, so we test every page
self.user.is_superuser = True
self.user.save()
self.login()
self.default_params = {}
for model in get_our_models():
model_name = camel_to_underscore(model.__name__)
method_name = 'create_{}'.format(model_name)
param_name = '{}_id'.format(model_name)
obj = mommy.make(model)
self.assertIsNotNone(obj, '{} returns None'.format(method_name))
self.default_params[param_name] = obj.id
def reverse_pattern(self, pattern, namespace):
url = reverse_pattern(pattern, namespace)
if url is None:
reverse_pattern(pattern, namespace, args=(1,))
if url is None:
reverse_pattern(pattern, namespace, args=(1, 1))
if url is None:
return None
view_params = resolve(url).kwargs
for param in view_params:
try:
view_params[param] = self.default_params[param]
except KeyError:
pass
return reverse_pattern(pattern, namespace, kwargs=view_params)
def test_responses(self):
ignored_namespaces = []
def test_url_patterns(patterns, namespace=''):
if namespace in ignored_namespaces:
return
for pattern in patterns:
self.login()
if hasattr(pattern, 'name'):
url = self.reverse_pattern(pattern, namespace)
if not url:
continue
try:
response = self.client.get(url)
except:
print("Url {} failed: ".format(url))
raise
msg = 'url "{}" returned {}'.format(
url, response.status_code
)
self.assertIn(
response.status_code,
(200, 302, 403), msg
)
# feedback that the test passed
print('.', end='')
else:
test_url_patterns(pattern.url_patterns, pattern.namespace)
test_url_patterns(urlpatterns)
for model, model_admin in admin.site._registry.items():
patterns = model_admin.get_urls()
test_url_patterns(patterns, namespace='admin')
|
nilq/baby-python
|
python
|
import pandas as pd
from modules.locale_generator.data import LocaleOutData
from helper.utils.utils import read_sheet_map_file
class LocaleProcessor:
def __init__(self, language_name, english_column_name):
self.language_name = language_name
self.english_column_name = english_column_name
self.additional_replacer_list = read_sheet_map_file()
def add_translation_if_present(self, df_row):
if self.language_name in list(df_row.index):
if pd.notnull(df_row[self.language_name]) and len(str(df_row[self.language_name]).strip()) != 0:
df_row['value'] = df_row[self.language_name]
return df_row
def replace_tags_in_df(self, df):
for i, df_row in df.iterrows():
if df_row['Key'] in self.additional_replacer_list.keys():
if 'replacements' in self.additional_replacer_list[df_row['Key']].keys():
for from_text, to in self.additional_replacer_list[df_row['Key']]['replacements'].items():
if pd.notnull(df_row[self.language_name]) and len(str(df_row[self.language_name]).strip()) != 0:
tmp = df_row[self.language_name]
df_row[self.language_name] = df_row[self.language_name].replace(from_text, to)
df.loc[i, self.language_name] = df_row[self.language_name]
if tmp == df_row[self.language_name]:
print("In", df_row['Key'], "=> ", from_text, 'is not changed')
print("Out", df_row[self.language_name], "=> ", from_text, 'is not changed')
return df
def clean_translation_excel(self, df, language_name):
columns = [self.english_column_name, language_name]
filtered_sheet = df[columns]
sheet_no_na = filtered_sheet.dropna(subset=[self.english_column_name], inplace=False)
for i, row in sheet_no_na.iterrows():
if pd.notna(row[language_name]):
row[language_name] = str(row[language_name]).strip()
if pd.notna(row[self.english_column_name]):
row[self.english_column_name] = str(row[self.english_column_name]).strip()
return sheet_no_na
def clean_meta_df(self, df):
for i, row in df.iterrows():
if pd.notna(row[self.english_column_name]):
row[self.english_column_name] = str(row[self.english_column_name]).strip()
return df
def clean_merged_excel(self, df, language_name):
excel_df = df.copy()
for i, row in excel_df.iterrows():
if pd.notna(row[language_name]):
row[language_name] = str(row[language_name]).strip()
excel_df = excel_df.drop_duplicates(subset=['Key', self.english_column_name], keep='last')
return excel_df
def process_with_meta_info(self, excel_df, meta_excel_df):
tmp_df = meta_excel_df[['Key', self.language_name]]
del meta_excel_df[self.language_name]
excel_df = self.clean_translation_excel(excel_df, self.language_name)
meta_excel_df = self.clean_meta_df(meta_excel_df)
merged_excel_df = pd.merge(meta_excel_df, excel_df, on=self.english_column_name,
how='inner')
merged_excel_df = self.clean_merged_excel(merged_excel_df, self.language_name)
return merged_excel_df
def merge_excel_and_json(self, excel_df, json_df):
merged_df = pd.merge(excel_df, json_df, on="Key", how='right')
merged_df = merged_df.apply(self.add_translation_if_present, axis=1)
select_columns = ['Key', 'value']
filtered_merged_df = merged_df[select_columns]
final_df = filtered_merged_df.drop_duplicates(subset=['Key'], keep='first', inplace=False)
return final_df
def process(self, meta_excel_df, input_excel_df, json_df):
excel_df = self.process_with_meta_info(input_excel_df, meta_excel_df)
excel_df = self.replace_tags_in_df(excel_df)
final_df = self.merge_excel_and_json(excel_df, json_df)
return LocaleOutData(json_df, excel_df, final_df)
|
nilq/baby-python
|
python
|
from unittest import mock
from bgmi.downloader.deluge import DelugeRPC
from bgmi.website.model import Episode
_token = "token:2334"
@mock.patch("bgmi.config.DELUGE_RPC_PASSWORD", _token)
@mock.patch("bgmi.downloader.deluge.DelugeRPC._call")
def test_init(call):
DelugeRPC(
download_obj=Episode(name="n", title="t", download="d"),
save_path="save_path",
)
call.assert_called_with("auth.login", [_token])
@mock.patch("bgmi.downloader.deluge.DelugeRPC._call")
def test_download_magnet(call):
DelugeRPC(
download_obj=Episode(name="n", title="t", download="magnet://233"),
save_path="save_path_1",
).download()
call.assert_called_with(
"web.add_torrents",
[
[
{
"path": "magnet://233",
"options": {
"add_paused": False,
"compact_allocation": False,
"move_completed": False,
"download_location": "save_path_1",
"max_connections": -1,
"max_download_speed": -1,
"max_upload_slots": -1,
"max_upload_speed": -1,
},
}
]
],
)
@mock.patch("bgmi.config.DELUGE_RPC_PASSWORD", _token)
@mock.patch("bgmi.downloader.deluge.DelugeRPC._call")
def test_download_torrent(call: mock.Mock):
call.return_value = {"result": "rr"}
DelugeRPC(
download_obj=Episode(name="n", title="t", download="d.torrent"),
save_path="save_path_1",
).download()
call.assert_has_calls(
[
mock.call("auth.login", [_token]),
mock.call("web.download_torrent_from_url", ["d.torrent"]),
mock.call(
"web.add_torrents",
[
[
{
"path": "rr",
"options": {
"add_paused": False,
"compact_allocation": False,
"move_completed": False,
"download_location": "save_path_1",
"max_connections": -1,
"max_download_speed": -1,
"max_upload_slots": -1,
"max_upload_speed": -1,
},
}
]
],
),
]
)
|
nilq/baby-python
|
python
|
'''
a = input("First number")
b = input("Second number")
a = int(a)
b = int(b)
print(a+b)
print(a-b)
print(a*b)
print(a/b)
result = a/b
print(type(result))
print(result)
print(a**2)
'''
# Логические операции
a = True
b = False
# Отрицание
print(not a)
# Логическое И
print(a and b)
# Логическое ИЛИ
print(a or b)
a = 10
print(a>100)
print(a<100)
print(a<=100)
print(a>=100)
print(a==100)
print(a!=100)
|
nilq/baby-python
|
python
|
from mypackage import shazam
shazam()
|
nilq/baby-python
|
python
|
# query_parser_test.py
# Author: Thomas MINIER - MIT License 2017-2018
import pytest
from query_engine.sage_engine import SageEngine
from query_engine.optimizer.query_parser import parse_query
from database.hdt_file_connector import HDTFileConnector
from tests.utils import DummyDataset
import math
hdtDoc = HDTFileConnector('tests/data/watdiv.10M.hdt')
dataset = DummyDataset(hdtDoc, 'watdiv100')
engine = SageEngine()
queries = [
("""
SELECT * WHERE {
?s <http://schema.org/eligibleRegion> <http://db.uwaterloo.ca/~galuc/wsdbm/Country9> .
?s <http://purl.org/goodrelations/includes> ?includes .
?s <http://purl.org/goodrelations/validThrough> ?validity .
}
""", 2180),
("""
SELECT *
FROM <http://localhost:8000/sparql/watdiv100>
WHERE {
?s <http://schema.org/eligibleRegion> <http://db.uwaterloo.ca/~galuc/wsdbm/Country9> .
?s <http://purl.org/goodrelations/includes> ?includes .
?s <http://purl.org/goodrelations/validThrough> ?validity .
}
""", 2180),
("""
SELECT * WHERE {
{
?s <http://schema.org/eligibleRegion> <http://db.uwaterloo.ca/~galuc/wsdbm/Country9> .
?s <http://purl.org/goodrelations/includes> ?includes .
?s <http://purl.org/goodrelations/validThrough> ?validity .
} UNION {
?s <http://schema.org/eligibleRegion> <http://db.uwaterloo.ca/~galuc/wsdbm/Country9> .
?s <http://purl.org/goodrelations/includes> ?includes .
?s <http://purl.org/goodrelations/validThrough> ?validity .
}
}
""", 2180 * 2),
("""
SELECT * WHERE {
<http://db.uwaterloo.ca/~galuc/wsdbm/Offer1000> <http://purl.org/goodrelations/price> ?price .
FILTER(?price = "232")
}
""", 1),
("""
SELECT * WHERE {
<http://db.uwaterloo.ca/~galuc/wsdbm/Offer1000> <http://purl.org/goodrelations/price> ?price .
FILTER(?price = "232" && 1 + 2 = 3)
}
""", 1),
("""
SELECT * WHERE {
?s <http://schema.org/eligibleRegion> <http://db.uwaterloo.ca/~galuc/wsdbm/Country9> .
GRAPH <http://localhost:8000/sparql/watdiv100> {
?s <http://purl.org/goodrelations/includes> ?includes .
?s <http://purl.org/goodrelations/validThrough> ?validity .
}
}
""", 2180)]
class TestQueryParser(object):
@pytest.mark.parametrize("query,cardinality", queries)
def test_query_parser(self, query, cardinality):
iterator, cards = parse_query(query, dataset, 'watdiv100', 'http://localhost:8000/sparql/')
assert len(cards) > 0
(results, saved, done) = engine.execute(iterator, math.inf)
assert len(results) == cardinality
assert done
|
nilq/baby-python
|
python
|
import sys
import time
from http.client import HTTPSConnection
from typing import List
def main(argv: List[str]):
time.sleep(3)
for i in range(3):
for j in argv:
connection = HTTPSConnection(j)
connection.request('GET', '/')
connection.getresponse().read()
time.sleep(5)
if __name__ == '__main__':
main(sys.argv[1:])
|
nilq/baby-python
|
python
|
# -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from unittest import TestCase
from mock import patch
import msgpack
from watcher_metering.agent.measurement import Measurement
from watcher_metering.agent.puller import MetricPuller
class FakeMetricPuller(MetricPuller):
@classmethod
def get_name(cls):
return 'dummy'
@classmethod
def get_default_probe_id(cls):
return 'dummy.data.puller'
@classmethod
def get_default_interval(cls):
return 1
def do_pull(self):
return
class TestMetricPuller(TestCase):
@patch.object(Measurement, "as_dict")
def test_puller_send_measurements(self, m_as_dict):
data_puller = FakeMetricPuller(
title=FakeMetricPuller.get_entry_name(),
probe_id=FakeMetricPuller.get_default_probe_id(),
interval=FakeMetricPuller.get_default_interval(),
)
measurement_dict = OrderedDict(
name="dummy.data.puller",
unit="",
type_="",
value=13.37,
resource_id="test_hostname",
host="test_hostname",
timestamp="2015-08-04T15:15:45.703542",
)
m_as_dict.return_value = measurement_dict
measurement = Measurement(**measurement_dict)
with patch.object(MetricPuller, 'notify') as m_notify:
data_puller.send_measurements([measurement])
expected_encoded_msg = msgpack.dumps(measurement_dict)
self.assertTrue(m_notify.called)
m_notify.assert_called_once_with(expected_encoded_msg)
|
nilq/baby-python
|
python
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from common.Utilities import execute_command, get_file_extension_list, error_exit
from ast import ASTGenerator, AST
import Mapper
import Finder
import Logger
import Extractor
import Emitter
def merge_var_info(var_expr_map, var_value_map):
Logger.trace(__name__ + ":" + sys._getframe().f_code.co_name, locals())
var_info = dict()
# print(var_expr_map)
# print(var_value_map)
for var_name in var_value_map:
if var_name in var_expr_map:
info = dict()
# print(var_name)
info["data_type"] = var_expr_map[var_name]['data_type']
# print(info["data_type"])
info["value_list"] = var_value_map[var_name]['value_list']
# print(info["value_list"])
info["expr_list"] = var_expr_map[var_name]['expr_list']
var_info[var_name] = info
# print(var_info)
return var_info
def merge_var_map(map_a, map_b):
Logger.trace(__name__ + ":" + sys._getframe().f_code.co_name, locals())
var_map = dict()
for var_name in map_a:
if var_name in map_b:
var_map[var_name] = map_b[var_name]
else:
var_map[var_name] = map_a[var_name]
for var_name in map_b:
if var_name not in map_a:
var_map[var_name] = map_b[var_name]
# print(var_info)
return var_map
def merge_macro_info(info_a, info_b):
Logger.trace(__name__ + ":" + sys._getframe().f_code.co_name, locals())
macro_info = dict()
for macro_name in info_a:
info = info_a[macro_name]
if macro_name in info_b.keys():
error_exit("MULTIPLE USAGE OF MACRO")
macro_info[macro_name] = info
for macro_name in info_b:
info = info_b[macro_name]
macro_info[macro_name] = info
return macro_info
def merge_header_info(info_a, info_b):
Logger.trace(__name__ + ":" + sys._getframe().f_code.co_name, locals())
header_info = dict()
for header_name in info_a:
info = info_a[header_name]
if header_name in info_b.keys():
error_exit("MULTIPLE USAGE OF HEADER")
header_info[header_name] = info
for header_name in info_b:
info = info_b[header_name]
header_info[header_name] = info
return header_info
def merge_data_type_info(info_a, info_b):
Logger.trace(__name__ + ":" + sys._getframe().f_code.co_name, locals())
header_info = dict()
for header_name in info_a:
info = info_a[header_name]
if header_name in info_b.keys():
error_exit("MULTIPLE USAGE OF DATA TYPE")
header_info[header_name] = info
for header_name in info_b:
info = info_b[header_name]
header_info[header_name] = info
return header_info
def merge_ast_script(ast_script, ast_node_a, ast_node_b, mapping_ba):
Logger.trace(__name__ + ":" + sys._getframe().f_code.co_name, locals())
Emitter.normal("\t\tmerging AST script")
merged_ast_script = list()
inserted_node_list = list()
deleted_node_list = list()
replace_node_list = list()
try:
ast_tree_a = AST.load_from_map(ast_node_a)
ast_tree_b = AST.load_from_map(ast_node_b)
except:
return None
# print(ast_script)
for script_line in ast_script:
# print(script_line)
if "Insert" in script_line:
# print(script_line)
node_id_a = int(((script_line.split(" into ")[0]).split("(")[1]).split(")")[0])
node_id_b = int(((script_line.split(" into ")[1]).split("(")[1]).split(")")[0])
if node_id_b in inserted_node_list or node_id_b == 0:
inserted_node_list.append(node_id_a)
continue
replace_node = Finder.search_ast_node_by_id(ast_node_b, node_id_a)
# print(replace_node)
target_node_id_a = mapping_ba[node_id_b]
target_node = Finder.search_ast_node_by_id(ast_node_a, target_node_id_a)
# print(target_node)
insert_position = int((script_line.split(" at ")[1]))
del_op = "Delete " + str(target_node['type']) + "(" + str(target_node_id_a) + ")\n"
# print(del_op)
possible_replacement_node = Finder.search_ast_node_by_id(ast_node_a, target_node_id_a)
parent_node = Finder.search_ast_node_by_id(ast_node_a, int(possible_replacement_node['parent_id']))
# print(parent_node)
del_parent_op = "Delete " + str(parent_node['type']) + "(" + str(parent_node['id']) + ")\n"
# print(del_parent_op)
# print(ast_script)
if del_op in ast_script:
replace_node_str = str(replace_node['type']) + "(" + str(node_id_a) + ")"
target_node_str = str(possible_replacement_node['type']) + "(" + str(target_node_id_a) + ")"
script_line = "Replace " + target_node_str + " with " + replace_node_str
inserted_node_list.append(node_id_a)
elif del_parent_op in ast_script:
replace_node_str = str(replace_node['type']) + "(" + str(node_id_a) + ")"
# print(replace_node_str)
target_node_str = str(possible_replacement_node['type']) + "(" + str(target_node_id_a) + ")"
# print(target_node_str)
script_line = "Replace " + target_node_str + " with " + replace_node_str
# print(script_line)
elif len(target_node['children']) > insert_position:
possible_replacement_node = target_node['children'][insert_position]
# print(possible_replacement_node)
replacement_node_id = possible_replacement_node['id']
del_op = "Delete " + str(possible_replacement_node['type']) + "(" + str(replacement_node_id) + ")\n"
# print(del_op)
if del_op in ast_script:
# print(del_op)
replace_node_str = str(replace_node['type']) + "(" + str(node_id_a) + ")"
target_node_str = str(possible_replacement_node['type']) + "(" + str(replacement_node_id) + ")"
script_line = "Replace " + target_node_str + " with " + replace_node_str
# print(script_line)
deleted_node_list.append(replacement_node_id)
child_id_list = Extractor.extract_child_id_list(possible_replacement_node)
deleted_node_list = deleted_node_list + child_id_list
if node_id_b not in inserted_node_list:
merged_ast_script.append(script_line)
inserted_node_list.append(node_id_a)
elif "Delete" in script_line:
node_id = int((script_line.split("(")[1]).split(")")[0])
node = Finder.search_ast_node_by_id(ast_node_a, node_id)
child_id_list = Extractor.extract_child_id_list(node)
deleted_node_list = deleted_node_list + child_id_list
if node_id not in deleted_node_list:
deleted_node_list.append(node_id)
merged_ast_script.append(script_line)
elif "Move" in script_line:
# print(script_line)
move_position = int((script_line.split(" at ")[1]))
move_node_str = (script_line.split(" into ")[0]).replace("Move ", "")
move_node_id_b = int((move_node_str.split("(")[1]).split(")")[0])
move_node_id_a = mapping_ba[move_node_id_b]
move_node_b = Finder.search_ast_node_by_id(ast_node_b, move_node_id_b)
move_node_a = Finder.search_ast_node_by_id(ast_node_a, move_node_id_a)
move_node_type_b = move_node_b['type']
move_node_type_a = move_node_a['type']
if move_node_type_b == "CaseStmt":
continue
target_node_id_b = int(((script_line.split(" into ")[1]).split("(")[1]).split(")")[0])
if target_node_id_b in inserted_node_list:
continue
# print(move_node_type_b)
# print(move_node_type_a)
target_node_id_a = mapping_ba[target_node_id_b]
# print(target_node_id_a)
target_node_a = Finder.search_ast_node_by_id(ast_node_a, target_node_id_a)
target_node_str = target_node_a['type'] + "(" + str(target_node_a['id']) + ")"
# print(target_node_a)
# print(move_node_type_a)
# print(move_node_type_b)
if move_node_type_a != move_node_type_b:
script_line = "Insert " + move_node_str + " into " + target_node_str + " at " + str(move_position)
if len(target_node_a['children']) <= move_position:
script_line = "Insert " + move_node_str + " into " + target_node_str + " at " + str(move_position)
elif len(target_node_a['children']) > move_position:
possible_replacement_node = target_node_a['children'][move_position]
# print(possible_replacement_node)
replacement_node_id = possible_replacement_node['id']
del_op = "Delete " + str(possible_replacement_node['type']) + "(" + str(replacement_node_id) + ")\n"
# print(del_op)
if del_op in ast_script:
# print(del_op)
replace_node_str = str(move_node_b['type']) + "(" + str(move_node_b['id']) + ")"
target_node_str = str(possible_replacement_node['type']) + "(" + str(replacement_node_id) + ")"
script_line = "Replace " + target_node_str + " with " + replace_node_str
# print(script_line)
deleted_node_list.append(replacement_node_id)
child_id_list = Extractor.extract_child_id_list(possible_replacement_node)
deleted_node_list = deleted_node_list + child_id_list
else:
replacing_node = target_node_a['children'][move_position]
replacing_node_id = replacing_node['id']
replacing_node_str = replacing_node['type'] + "(" + str(replacing_node['id']) + ")"
script_line = "Replace " + replacing_node_str + " with " + move_node_str
deleted_node_list.append(replacing_node_id)
child_id_list = Extractor.extract_child_id_list(replacing_node)
deleted_node_list = deleted_node_list + child_id_list
# print(replacing_node_id)
inserted_node_list.append(replacing_node_id)
# print(script_line)
merged_ast_script.append(script_line)
elif "Update" in script_line:
# print(script_line)
# update_line = str(script_line).replace("Update", "Replace").replace(" to ", " with ")
# print(update_line)
merged_ast_script.append(script_line)
second_merged_ast_script = list()
for script_line in merged_ast_script:
# print(script_line)
if "Replace" in script_line:
# print(script_line)
node_id_a = int(((script_line.split(" with ")[0]).split("(")[1]).split(")")[0])
node_id_b = int(((script_line.split(" with ")[1]).split("(")[1]).split(")")[0])
node_a = Finder.search_ast_node_by_id(ast_node_a, node_id_a)
parent_node_id_a = int(node_a['parent_id'])
parent_node_a = Finder.search_ast_node_by_id(ast_node_a, parent_node_id_a)
if len(parent_node_a['children']) > 0:
count = 0
for child_node in parent_node_a['children']:
replace_op = "Replace " + child_node['type'] + "(" + str(child_node['id']) + ")"
count += sum(replace_op in s for s in merged_ast_script)
if count > 1:
node_b = Finder.search_ast_node_by_id(ast_node_b, node_id_b)
parent_node_id_b = int(node_b['parent_id'])
parent_node_b = Finder.search_ast_node_by_id(ast_node_b, parent_node_id_b)
parent_node_str_a = parent_node_a['type'] + "(" + str(parent_node_a['id']) + ")"
parent_node_str_b = parent_node_b['type'] + "(" + str(parent_node_b['id']) + ")"
new_op = "Replace " + parent_node_str_a + " with " + parent_node_str_b + "\n"
if new_op not in second_merged_ast_script:
second_merged_ast_script.append(new_op)
else:
second_merged_ast_script.append(script_line)
else:
second_merged_ast_script.append(script_line)
elif "Update" in script_line:
update_line = str(script_line).replace("Update", "Replace").replace(" to ", " with ")
# print(update_line)
second_merged_ast_script.append(update_line)
else:
second_merged_ast_script.append(script_line)
return second_merged_ast_script
|
nilq/baby-python
|
python
|
# CONTADOR DE CÉDULAS
value = int(input('Digite o valor a ser sacado: '))
total = value # Pegando o valor a ser sacado
ced = 50 # Começando na cedula de 50
totced = 0 # Total de cedula por valor
while True:
if total >= ced: # Enquanto puder retirar o valor da cedula do valor total
total -= ced # Retirando o valor da cedula do valor total
totced += 1 # Contando o número de cédulas por valor
else: # Quando não puder mais tirar do valor até então usado
if totced > 0: # Printar apenas as cédulas usadas, se usar 0 de algum valor, não printará
print(f'{totced} cédulas de R${ced:.2f}')
if ced == 50: # Quando não puder mais decrementar 50
ced = 20
elif ced == 20: # Quando não puder mais decrementar 20
ced = 10
elif ced == 10: # Quando não puder mais decrementar 10
ced = 1
totced = 0 # Reiniciando a contagem de cédulas
if total == 0:
break
|
nilq/baby-python
|
python
|
"""
Generate static HTML files for eduid-IdP in all supported languages.
"""
import os
import sys
import six
import pkg_resources
from six.moves import configparser
from jinja2 import Environment, PackageLoader
from babel.support import Translations
__version__ = '0.1'
__copyright__ = 'SUNET'
__organization__ = 'SUNET'
__license__ = 'BSD'
__authors__ = ['Fredrik Thulin']
__all__ = [
]
_CONFIG_DEFAULTS = {'gettext_domain': 'eduid_IdP_html',
}
def translate_templates(env, loader, settings, verbose=False, debug=False):
"""
Translate all templates available through `loader'.
Returns a big dict with all the translated templates, in all languages :
{'login.jinja2': {'en': string, 'sv': string, ...},
'error.jinja2': ...
}
:param env: jinja2.Environment()
:param loader: jinja2.BaseLoader()
:param settings: dict with settings and variables available to the Jinja2 templates
:param verbose: boolean, output to stdout or not
:param debug: boolean, output debug information to stderr or not
:return: dict with translated templates
"""
languages = {}
res = {}
locale_dir = pkg_resources.resource_filename(__name__, 'locale')
for lang in pkg_resources.resource_listdir(__name__, 'locale'):
lang_dir = os.path.join(locale_dir, lang)
if not os.path.isdir(lang_dir):
if debug:
sys.stderr.write("Not a directory: {!r}\n".format(lang_dir))
continue
if verbose:
languages[lang] = 1
translations = Translations.load(locale_dir, [lang], settings['gettext_domain'])
env.install_gettext_translations(translations)
for template_file in loader.list_templates():
if template_file.endswith('.swp'):
continue
template = env.get_template(template_file)
translated = template.render(settings=settings)
if not template_file in res:
res[template_file] = {}
res[template_file][lang] = translated.encode('utf-8')
if debug:
sys.stderr.write("Lang={!s} :\n{!s}\n\n".format(lang, translated.encode('utf-8')))
if verbose:
print("\nLanguages : {!r}\nGenerated templates : {!r}\n".format(
sorted(languages.keys()), sorted(res.keys())))
return res
def load_settings(resource_name='settings.ini'):
"""
Load settings from INI-file (package resource).
All options from all sections are collapsed to one flat namespace.
:param resource_name: string, name of package resource to load.
:return: dict with settings
"""
config = configparser.ConfigParser(_CONFIG_DEFAULTS)
if six.PY2:
config_fp = pkg_resources.resource_stream(__name__, resource_name)
config.readfp(config_fp, resource_name)
else:
config_str = pkg_resources.resource_string(__name__, resource_name)
config.read_string(config_str.decode('utf8'), resource_name)
settings = {}
for section in config.sections():
for option in config.options(section):
settings[option] = config.get(section, option)
return settings
def save_to_files(translated, output_dir, verbose):
"""
Save translated templates to files (with a .html extension).
:param translated: dict (result of translate_templates() probably)
:param output_dir: string, output path
:param verbose: boolean, print status output to stdout or not
"""
for template in translated.keys():
template_html_fn = template
if template_html_fn.endswith('.jinja2'):
# remove '.jinja2' extension
template_html_fn = template_html_fn[:len(template_html_fn) - len('.jinja2')]
template_html_fn += '.html'
for lang in translated[template].keys():
lang_dir = os.path.join(output_dir, lang)
try:
os.stat(lang_dir)
except OSError:
os.mkdir(lang_dir)
output_fn = os.path.join(lang_dir, template_html_fn)
fp = open(output_fn, 'w')
if six.PY2:
fp.write(translated[template][lang])
else:
fp.write(translated[template][lang].decode('utf8'))
fp.write("\n") # eof newline disappears in Jinja2 rendering
if verbose:
print("Wrote {!r}".format(output_fn))
if verbose:
print("\n")
def main(verbose=False, output_dir=None):
"""
Code executed when this module is started as a script.
:param verbose: boolean, print status output to stdout/stderr or not
:param output_dir: string, output path
:return: boolean
"""
settings = load_settings()
if not settings:
return False
loader = PackageLoader(__name__)
env = Environment(loader=loader,
extensions=['jinja2.ext.i18n',
'jinja2.ext.autoescape',
'jinja2.ext.with_',
],
)
translated = translate_templates(env, loader, settings, verbose)
if output_dir:
save_to_files(translated, output_dir, verbose)
return True
if __name__ == '__main__':
if not main(verbose=True, output_dir="/tmp/foo"):
sys.exit(1)
sys.exit(0)
|
nilq/baby-python
|
python
|
import pywikibot
from openpyxl import load_workbook
site = pywikibot.Site()
'''
page = pywikibot.Page(site, u"Project:Sandbox")
text = page.text
#page.text = u"DarijaBot kheddam daba"
page.save(page.text+u"\n\nawwal edit dial DarijaBot")
'''
sandbox = pywikibot.Page(site, 'User:' + site.user() + '/Project:Sandbox')
sandbox.text = page.text+u"\n\nawwal edit dial DarijaBot"
sandbox.save()
|
nilq/baby-python
|
python
|
import numpy as np
from .agent import Agent
class RandomAgent(Agent):
def __init__(self, actions):
super(RandomAgent, self).__init__(actions)
def act(self, obs):
return np.random.randint(0, self.num_actions)
|
nilq/baby-python
|
python
|
from rA9.neurons.LIF import LIF
def LIF_recall(tau, Vth, dt, x, v_current):
model = LIF(tau_m=tau, Vth=Vth, dt=dt)
spike_list, v_current = model.forward(x, v_current=v_current)
return spike_list, v_current
def LIF_backward(tau, Vth, x, spike_list, e_grad, time):
model = LIF(tau_m=tau, Vth=Vth)
return model.backward(time=time, spike_list=spike_list, weights=x, e_gradient=e_grad)
|
nilq/baby-python
|
python
|
from meido.libs.yuntongxun.sms import CCP
from celery_tasks.main import app
@app.task(name='send_sms_code')
def send_sms_code(mobile, sms_code):
ccp = CCP()
ccp.send_template_sms(mobile, [sms_code, '5'], 1)
@app.task()
def test_print():
print(2222)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
def naive_search(l,x):
for i in range(len(l)):
if l[i]==x:
return i
n = int(input())
l = list(range(n))
cnt = 0
for x in range(n):
i = naive_search(l,x)
cnt += 1
print("#query=%d"%cnt)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains artellapipe-tools-welcome view implementation
"""
from __future__ import print_function, division, absolute_import
import os
import random
from functools import partial
from Qt.QtCore import Qt, QSize, QTimer, QByteArray, QBuffer
from Qt.QtWidgets import QSizePolicy, QLabel, QFrame, QGraphicsOpacityEffect, QRadioButton
from Qt.QtGui import QMovie
from tpDcc.managers import resources
from tpDcc.libs.qt.core import base, qtutils, animation
from tpDcc.libs.qt.widgets import layouts, buttons, stack
from artellapipe.tools.welcome.widgets import widget, frame, shortcuts, final
class WelcomeView(base.BaseWidget):
def __init__(self, project, parent=None):
self._radio_buttons = list()
self._offset = 0
self._project = project
self._toolset = parent
self._logo_gif_file = None
self._logo_gif_byte_array = None
self._logo_gif_buffer = None
self._logo_movie = None
super(WelcomeView, self).__init__(parent=parent)
self._init()
def ui(self):
super(WelcomeView, self).ui()
self.resize(685, 290)
self.setAttribute(Qt.WA_TranslucentBackground)
if qtutils.is_pyside2():
self.setWindowFlags(self.windowFlags() | Qt.FramelessWindowHint)
else:
self.setWindowFlags(Qt.Window | Qt.FramelessWindowHint)
main_frame = frame.WelcomeFrame(pixmap=self._get_welcome_pixmap())
frame_layout = layouts.VerticalLayout(spacing=2, margins=(10, 0, 10, 0))
main_frame.setLayout(frame_layout)
top_layout = layouts.HorizontalLayout(spacing=2, margins=(2, 2, 2, 2))
frame_layout.addLayout(top_layout)
self._close_btn = buttons.BaseButton('', parent=self)
self._close_btn.setIcon(resources.icon('close', theme='window'))
self._close_btn.setStyleSheet('QWidget {background-color: rgba(255, 255, 255, 0); border:0px;}')
self._close_btn.setIconSize(QSize(25, 25))
top_layout.addStretch()
self._logo = QLabel('', parent=self)
top_layout.addWidget(self._logo)
top_layout.addStretch()
top_layout.addWidget(self._close_btn)
base_frame = QFrame()
base_frame.setObjectName('baseFrame')
base_frame.setFrameShape(QFrame.NoFrame)
base_frame.setFrameShadow(QFrame.Plain)
# base_frame.setAttribute(Qt.WA_TranslucentBackground)
base_frame.setStyleSheet('QFrame#baseFrame { background-color: rgba(100, 100, 100, 80); }')
base_frame.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
base_layout = layouts.VerticalLayout(spacing=2, margins=(2, 2, 2, 2))
base_frame.setLayout(base_layout)
frame_layout.addWidget(base_frame)
self._stack = stack.SlidingOpacityStackedWidget(parent=self)
self._stack.setAutoFillBackground(False)
self._stack.setAttribute(Qt.WA_TranslucentBackground)
base_layout.addWidget(self._stack)
bottom_layout = layouts.HorizontalLayout(spacing=2, margins=(2, 2, 2, 2))
frame_layout.addLayout(bottom_layout)
self._left_btn = buttons.BaseButton('Skip', parent=self)
self._left_btn.setMinimumSize(QSize(100, 30))
self._left_btn.setStyleSheet(
"""
QPushButton\n{\n\nbackground-color: rgb(250,250,250,30);\ncolor: rgb(250, 250, 250);
\nborder-radius: 5px;\nborder: 0px;\npadding-left: 15px;\npadding-right: 15px;\n}\n\nQPushButton:hover\n{\n
background-color: rgb(250,250,250,20);\n}\n\nQPushButton:pressed\n{\n\nbackground-color: rgb(0,0,0,30);\n}
"""
)
self._right_btn = buttons.BaseButton('Next', parent=self)
self._right_btn.setMinimumSize(QSize(100, 30))
self._right_btn.setStyleSheet(
"""
QPushButton\n{\n\nbackground-color: rgb(250,250,250,30);\ncolor: rgb(250, 250, 250);
\nborder-radius: 5px;\nborder: 0px;\npadding-left: 15px;\npadding-right: 15px;\n}\n\nQPushButton:hover\n{\n
background-color: rgb(250,250,250,20);\n}\n\nQPushButton:pressed\n{\n\nbackground-color: rgb(0,0,0,30);\n}
"""
)
self.setStyleSheet(
"QRadioButton::indicator {\nbackground-color: rgb(250,250,250,120);\n}\n"
"QRadioButton::indicator::unchecked {\nbackground-color: rgb(255,255,255,70);\nborder-radius: 4px;\n"
"width: 8px;\n height: 8px;\n}\nQRadioButton::indicator::checked {"
"\nbackground: qlineargradient(x1: 0, y1: 1, x2: 1, y2: 1, stop: 0 rgba(" + self._project.dev_color0 + "), "
"stop: 1 rgba(" + self._project.dev_color1 + "));\n border-radius: 5px;\n width: 10px;\n height: 10px;\n}")
self._radio_layout = layouts.HorizontalLayout(spacing=2, margins=(2, 2, 2, 2))
bottom_layout.addStretch()
bottom_layout.addWidget(self._left_btn)
bottom_layout.addStretch()
bottom_layout.addLayout(self._radio_layout)
bottom_layout.addStretch()
bottom_layout.addWidget(self._right_btn)
bottom_layout.addStretch()
self.main_layout.addWidget(main_frame)
def setup_signals(self):
self._right_btn.clicked.connect(lambda: self._on_button_clicked(+1))
self._left_btn.clicked.connect(lambda: self._on_button_clicked(-1))
def mousePressEvent(self, event):
"""
Overrides base ArtellaDialog mousePressEvent function
:param event: QMouseEvent
"""
self._offset = event.pos()
def mouseMoveEvent(self, event):
"""
Overrides base ArtellaDialog mouseMoveEvent function
:param event: QMouseEvent
"""
x = event.globalX()
y = event.globalY()
x_w = self._offset.x()
y_w = self._offset.y()
self._toolset.attacher.move(x - x_w, y - y_w)
def _init(self):
"""
Initializes Welcome dialog
"""
self._tab_opacity_effect = QGraphicsOpacityEffect(self)
self._tab_opacity_effect.setOpacity(0)
self._stack.setGraphicsEffect(self._tab_opacity_effect)
self._setup_logo()
self._setup_pages()
self._set_index(0)
def _setup_logo(self):
"""
Internal function that setup project logo
"""
logo_gif = resources.get('images', '{}_logo.gif'.format(self._project.name.lower()))
if not logo_gif or not os.path.isfile(logo_gif):
return
self._logo_gif_file = open(logo_gif, 'rb').read()
self._logo_gif_byte_array = QByteArray(self._logo_gif_file)
self._logo_gif_buffer = QBuffer(self._logo_gif_byte_array)
self._logo_movie = QMovie()
self._logo_movie.setDevice(self._logo_gif_buffer)
self._logo_movie.setCacheMode(QMovie.CacheAll)
self._logo_movie.setScaledSize(QSize(60, 60))
self._logo_movie.setSpeed(100)
self._logo_movie.jumpToFrame(0)
self._logo_movie.start()
self._logo.setAttribute(Qt.WA_NoSystemBackground)
self._logo.setMovie(self._logo_movie)
def _setup_pages(self):
"""
Internal callback function that set the pages of the stack
Overrides to add new pages
"""
self._welcome_widget = widget.WelcomeWidget(project=self._project, parent=self)
self._shortcuts_widget = shortcuts.ShortcutsWidget(project=self._project, parent=self)
self._final_widget = final.FinalWidget(project=self._project, parent=self)
self._final_widget.showChangelog.connect(self._on_show_changelog)
self._add_page(self._welcome_widget)
self._add_page(self._shortcuts_widget)
self._add_page(self._final_widget)
def _get_welcome_pixmap(self):
"""
Returns pixmap to be used as splash background
:return: Pixmap
"""
welcome_path = resources.get('images', 'welcome.png', key='project')
if not os.path.isfile(welcome_path):
welcome_Dir = os.path.dirname(welcome_path)
welcome_files = [
f for f in os.listdir(welcome_Dir) if f.startswith('welcome') and os.path.isfile(
os.path.join(welcome_Dir, f))]
if welcome_files:
welcome_index = random.randint(0, len(welcome_files) - 1)
welcome_name, splash_extension = os.path.splitext(welcome_files[welcome_index])
welcome_pixmap = resources.pixmap(
welcome_name, extension=splash_extension[1:], key='project')
else:
welcome_pixmap = resources.pixmap('welcome')
else:
welcome_pixmap = resources.pixmap('welcome', key='project')
return welcome_pixmap.scaled(QSize(800, 270))
def _add_page(self, widget):
"""
Adds a new widget into the stack
:param widget: QWidget
"""
total_pages = len(self._radio_buttons)
new_radio = QRadioButton(parent=self)
if total_pages == 0:
new_radio.setChecked(True)
new_radio.clicked.connect(partial(self._set_index, total_pages))
self._stack.addWidget(widget)
self._radio_layout.addWidget(new_radio)
self._radio_buttons.append(new_radio)
def _increment_index(self, input):
"""
Internal function that increases index of the stack widget
:param input: int
"""
current = self._stack.currentIndex()
self._set_index(current + input)
def _set_index(self, index):
"""
Internal function that updates stack index and UI
:param index: int
"""
animation.fade_animation(start='current', end=0, duration=400, object=self._tab_opacity_effect)
if index <= 0:
index = 0
if index >= self._stack.count() - 1:
index = self._stack.count() - 1
self._radio_buttons[index].setChecked(True)
self.props_timer = QTimer(singleShot=True)
self.props_timer.timeout.connect(self._on_fade_up_tab)
self.props_timer.timeout.connect(lambda: self._stack.setCurrentIndex(index))
self.props_timer.start(450)
prev_text = 'Previous'
next_text = 'Next'
skip_text = 'Skip'
close_text = 'Finish'
if index == 0:
self._left_btn.setText(skip_text)
self._right_btn.setText(next_text)
elif index < self._stack.count() - 1:
self._left_btn.setText(prev_text)
self._right_btn.setText(next_text)
elif index == self._stack.count() - 1:
self._left_btn.setText(prev_text)
self._right_btn.setText(close_text)
def _launch_project(self):
"""
Internal function that closes Welcome dialog and launches project tools
"""
self._toolset.attacher.fade_close()
def _on_fade_up_tab(self):
"""
Internal callback function that is called when stack index changes
"""
animation.fade_animation(start='current', end=1, duration=400, object=self._tab_opacity_effect)
def _on_button_clicked(self, input):
"""
Internal callback function that is called when Next and and Skip buttons are pressed
:param input: int
"""
current = self._stack.currentIndex()
action = 'flip'
if current == 0:
if input == -1:
action = 'close'
elif current == self._stack.count() - 1:
if input == 1:
action = 'close'
if action == 'flip':
self._increment_index(input)
elif action == 'close':
self._launch_project()
def _on_show_changelog(self, changelog_window):
"""
Internal callback function that is called when show changelog button is pressed in the final widget
"""
self.close_tool_attacher()
# changelog_window.show()
|
nilq/baby-python
|
python
|
from flask_cors import cross_origin
from app.blueprints.base_blueprint import (
Blueprint,
BaseBlueprint,
request,
Security,
Auth,
)
from app.controllers.user_employment_controller import UserEmploymentController
url_prefix = "{}/user_employment_history".format(BaseBlueprint.base_url_prefix)
user_employment_blueprint = Blueprint(
"user_employment", __name__, url_prefix=url_prefix
)
user_employment_controller = UserEmploymentController(request)
@user_employment_blueprint.route("/user/<int:user_id>", methods=["GET"])
# @cross_origin(supports_credentials=True)
@Auth.has_permission(["view_user_employment_history"])
# @swag_from('documentation/get_all_user_employment_history.yml')
def list_user_employment_history(user_id):
return user_employment_controller.list_user_employment_history(user_id)
@user_employment_blueprint.route(
"/user-single/<int:user_employment_id>", methods=["GET"]
)
# @cross_origin(supports_credentials=True)
@Auth.has_permission(["view_user_employment_history"])
# @swag_from('documentation/get_user_employment_by_id.yml')
def get_user_employment(user_employment_id):
return user_employment_controller.get_user_employment(user_employment_id)
@user_employment_blueprint.route("/", methods=["POST"])
# @cross_origin(supports_credentials=True)
@Security.validator(
[
"user_id|required:int",
"institution_name|required:string",
"job_title|required:string",
"start_date|required:date",
"end_date|required:date",
"is_current|required",
"skills|optional:list_int",
]
)
@Auth.has_permission(["create_user_employment_history"])
# @swag_from('documentation/create_user_employment.yml')
def create_user_employment():
return user_employment_controller.create_user_employment()
@user_employment_blueprint.route("/<int:update_id>", methods=["PUT", "PATCH"])
# @cross_origin(supports_credentials=True)
@Security.validator(
[
"user_id|required:int",
"user_employment_id|required:int",
"institution_name|required:string",
"job_title|required:string",
"start_date|required:date",
"end_date|required:date",
"is_current|required",
"skills|optional:list_int",
]
)
@Auth.has_permission(["update_user_employment_history"])
# @swag_from("documentation/update_user_employment.yml")
def update_user_employment(update_id):
return user_employment_controller.update_user_employment(update_id)
@user_employment_blueprint.route("/<int:user_employment_id>", methods=["DELETE"])
# @cross_origin(supports_credentials=True)
@Auth.has_permission(["delete_user_employment_history"])
# @swag_from("documentation/delete_user_employment.yml")
def delete_user_employment(user_employment_id):
return user_employment_controller.delete_user_employment(user_employment_id)
|
nilq/baby-python
|
python
|
from django.test import TestCase
from django.contrib.auth.models import User
from .models import Thread, Message
# Create your tests here.
class ThreadTestCase(TestCase):
def setUp(self):
self.user1 = User.objects.create_user("user1", None, "test1234")
self.user2 = User.objects.create_user("user2", None, "test1234")
self.user3 = User.objects.create_user("user3", None, "test1234")
self.thread = Thread.objects.create()
def test_add_users_to_thread(self):
self.thread.users.add(self.user1, self.user2)
self.assertEqual(len(self.thread.users.all()), 2)
def test_filter_thread_by_users(self):
self.thread.users.add(self.user1, self.user2)
threads = Thread.objects.filter(users=self.user1).filter(users=self.user2)
self.assertEqual(self.thread, threads[0])
def test_filter_non_existent_thread(self):
threads = Thread.objects.filter(users=self.user1).filter(users=self.user2)
self.assertEqual(len(threads), 0)
def test_add_message_to_thread(self):
self.thread.users.add(self.user1, self.user2)
message1 = Message.objects.create(user=self.user1, content="Hola!")
message2 = Message.objects.create(user=self.user2, content="Que tal?")
self.thread.messages.add(message1, message2)
self.assertEqual(len(self.thread.messages.all()), 2)
for message in self.thread.messages.all():
print("({}): {}".format(message.user, message.content))
def test_add_message_from_user_not_in_thread(self):
self.thread.users.add(self.user1, self.user2)
message1 = Message.objects.create(user=self.user1, content="Hola!")
message2 = Message.objects.create(user=self.user2, content="Que tal?")
message3 = Message.objects.create(user=self.user3, content="Soy un espía")
self.thread.messages.add(message1, message2, message3)
self.assertEqual(len(self.thread.messages.all()), 2)
def test_find_thread_with_custom_manager(self):
self.thread.users.add(self.user1, self.user2)
thread = Thread.objects.find(self.user1, self.user2)
self.assertEqual(self.thread, thread)
def test_find_or_create_thread_with_custom_manager(self):
self.thread.users.add(self.user1, self.user2)
thread = Thread.objects.find_or_create(self.user1, self.user2)
self.assertEqual(self.thread, thread)
thread = Thread.objects.find_or_create(self.user1, self.user3)
self.assertIsNotNone(thread)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import argparse
import sys
import Tools
from Bio import SeqIO
from Bio import SeqRecord
from Bio import Seq
ap = argparse.ArgumentParser(description="Take an inversion bed file, and print the sequences in inverted format with some flanking part of the genome.")
ap.add_argument("bed", help="Input BED file. This should be in the insertion bed format.")
ap.add_argument("genome", help="Input genome with a .fai file.")
ap.add_argument("contexts", help="Output file.")
ap.add_argument("--window", help="Amount to store on sides.", type=int, default=1000)
ap.add_argument("--bponly", help="Print only the breakpoints, value specifies amount of sequence about each breakpoint to print.", type=int, default=None)
ap.add_argument("--noreverse", help="Assume the breakpoints are from assembled contigs and there is no need to reverse the alignment.", default=False, action='store_true')
args = ap.parse_args()
bedFile = open(args.bed)
contextFile = open(args.contexts, 'w')
fai = Tools.ReadFAIFile(args.genome+".fai")
genomeFile = open(args.genome)
for line in bedFile:
vals = line.split()
chrom = vals[0]
start = int(vals[1])
end = int(vals[2])
prefixCoords = (max(0, start - args.window), start)
suffixCoords = (end, min(fai[chrom][0], end+args.window))
invSeq = Seq.Seq(Tools.ExtractSeq((chrom, start, end), genomeFile, fai))
prefix = Tools.ExtractSeq((chrom, prefixCoords[0],prefixCoords[1]), genomeFile, fai)
suffix = Tools.ExtractSeq((chrom, suffixCoords[0],suffixCoords[1]), genomeFile, fai)
if (args.noreverse == False):
invSeqStr = invSeq.reverse_complement().tostring()
else:
invSeqStr = invSeq.tostring()
context = prefix + invSeqStr + suffix
seqname = "/".join(vals[0:3])
if (args.bponly is None):
SeqIO.write(SeqRecord.SeqRecord(Seq.Seq(context), id=seqname, name="",description=""), contextFile, "fasta")
else:
bp1 = len(prefix)
bp2 = len(prefix) + len(invSeqStr)
bp1Seq = context[bp1-args.bponly:bp1+args.bponly]
bp2Seq = context[bp2-args.bponly:bp2+args.bponly]
bp1Name = seqname + "_1"
bp2Name = seqname + "_2"
SeqIO.write(SeqRecord.SeqRecord(Seq.Seq(bp1Seq), id=bp1Name, name="",description=""), contextFile, "fasta")
SeqIO.write(SeqRecord.SeqRecord(Seq.Seq(bp2Seq), id=bp2Name, name="",description=""), contextFile, "fasta")
contextFile.close()
|
nilq/baby-python
|
python
|
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.views.generic import ListView
from django.http import JsonResponse
from api.models import Country, City
class CountryView(LoginRequiredMixin, UserPassesTestMixin, ListView):
model = Country
response_class = JsonResponse
def handle_no_permission(self):
return JsonResponse({'detail': 'Access Denied'}, status=403)
def test_func(self):
return self.request.user.username.startswith('t')
def render_to_response(self, context, **response_kwargs):
features = [obj.as_dict for obj in self.get_queryset()]
return JsonResponse({'type': 'FeatureCollection', 'features': features}, safe=False)
class CityView(ListView):
model = City
response_class = JsonResponse
def render_to_response(self, context, **response_kwargs):
features = [obj.as_dict for obj in self.get_queryset()]
return JsonResponse({'type': 'FeatureCollection', 'features': features}, safe=False)
|
nilq/baby-python
|
python
|
from .triplet_generators import generator_from_neighbour_matrix, \
TripletGenerator, UnsupervisedTripletGenerator, SupervisedTripletGenerator
from .generators import KerasSequence
|
nilq/baby-python
|
python
|
import setuptools
with open("./README.md", "r") as f:
description = f.read()
setuptools.setup(
name="blive",
version="0.0.5",
author="cam",
author_email="yulinfeng000@gmail.com",
long_description=description,
long_description_content_type="text/markdown",
url="https://github.com/yulinfeng000/blive",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
],
install_requires=["aiohttp","loguru","requests","APScheduler","brotli"]
)
|
nilq/baby-python
|
python
|
from .md5_database import MD5DB
from .paper_database import paperDB
|
nilq/baby-python
|
python
|
import socket
def resolv(hostname):
try:
return socket.gethostbyname(hostname)
except socket.gaierror as e:
raise Exception('Cloud not resolv "%s": %s' % (hostname, e))
class FilterModule(object):
filter_map = {
'resolv': resolv,
}
def filters(self):
return {'resolv': resolv}
|
nilq/baby-python
|
python
|
import unittest
import mutable_test
class TestRemoveShared(unittest.TestCase):
'''Tests for function duplicates.remove_shared.'''
def test_general_case(self):
'''
Test remove_shared where there are items that appear in both lists,
and items that appear in only one or the other list.
'''
list_1 = [1, 2, 3, 4, 5, 6]
list_2 = [2, 4, 5, 7]
list_1_expected = [1, 3, 6]
list_2_expected = [2, 4, 5, 7]
mutable_test.remove_shared(list_1, list_2)
self.assertEqual(list_1, list_1_expected)
self.assertEqual(list_2, list_2_expected)
if __name__ == '__main__':
unittest.main(exit=False)
|
nilq/baby-python
|
python
|
import numpy as np
from .preprocessing import *
from .sequence_alignment import *
class SemAlign(object):
def __init__(self, embeddings_path, kernel_size=1, delimiter=',', verbose=True):
self.lookup = load_embeddings(embeddings_path, delimiter)
self.kernel_size = kernel_size
self.verbose = verbose
def align(self, text_str1, text_str2, k=1, w=(0.25,0.25)):
# Preprocess strings
tokens_list = preprocess_strs([text_str1, text_str2])
# Gather embeddings for tokens in each string
token_embeddings = [get_embeddings(_, self.lookup) for _ in tokens_list]
# Search for sequence alignments for each search str along text file
all_alignments = []
alignment_scores = []
# Apply sequence kernels of radius len(search_phrase) to search phrase and text
text1 = apply_sequence_kernel(token_embeddings[0], self.kernel_size)
text2 = apply_sequence_kernel(token_embeddings[1], self.kernel_size)
# Calculate cosine similarity between search phrase and text
cos_dist = distance_matrix(text1, text2)
# Calculate scoring matrix for sequence alignment
score = scoring_matrix(cos_dist, wi=w[0], wj=w[1])
# Find first k alignments of len > 1
alignments = traceback(score, k=None)
for j, _ in enumerate(alignments):
all_alignments.append(_)
alignment_scores.append(score_alignment(_, token_embeddings[0], text2, 1-(j/len(alignments))))
# Sort
sorted_scores = np.argsort(alignment_scores)[::-1]
# Display results
if self.verbose:
if k>1: print("Top ", k,':')
for i in range(k):
alignment = all_alignments[sorted_scores[i]]
ss1 = []
ss2 = []
l = -1
j = -1
for _ in reversed(alignment):
if _[0] != l:
ss1.append(tokens_list[0][_[0]])
l = _[0]
else: ss1.append('GAP')
if _[1] != j:
ss2.append(tokens_list[1][_[1]])
j = _[1]
else: ss2.append('GAP')
print('Match', i+1, ':', 'Score:',alignment_scores[sorted_scores[i]])
print(ss1)
print(ss2,'\n')
# Compile Top results
alignments = np.array(alignments)
alignment_scores = np.array(alignment_scores)
top_alignments = alignments[sorted_scores[:k].astype('int')]
top_scores = alignment_scores[sorted_scores[:k].astype('int')]
return top_alignments, top_scores
|
nilq/baby-python
|
python
|
from enum import Enum, auto
class Color(Enum):
BLACK = 0
WHITE = 1
RED = 2
BLUE = 3
# class TestEnum(Enum):
# ONE = 0
# ONE = 1
class TestNewEnum(Enum):
ZERO = auto()
ONE = auto()
TWO = auto()
if __name__ == "__main__":
print(Color.BLACK)
print(Color.BLACK.name)
print(Color.BLACK.value)
for it in Color:
print(it)
for it in TestNewEnum:
print(repr(it))
|
nilq/baby-python
|
python
|
# Update the paths according to your environemnt
OPT = "/data/guide/Trident/llvm-2.9-build/bin/opt"
LLVMGCC = "/data/Trident/llvm-gcc4.2-2.9-x86_64-linux/bin/llvm-gcc"
LLVMLINK = "/data/guide/Trident/llvm-2.9-build/bin/llvm-link"
LLVMPASS_FOLDER = "/data/guide/Trident/llvm-2.9-build/lib/"
LLI = "/data/guide/Trident/llvm-2.9-build/bin/lli"
PYTHON = "python"
|
nilq/baby-python
|
python
|
def pour(jug1, jug2):
capacityA = 5
capacityB = 7
Measure = 4
print("%d \t %d" % (jug1, jug2))
if jug2 is Measure: # jug2 has "measure" amount of water stop
return
elif jug2 is capacityB:
pour(0, jug1)
elif jug1 != 0 and jug2 is 0:
pour(0, jug1)
elif jug1 is Measure: # jug1 has "measure" amount of water, pour that to jug2
pour(jug1, 0)
elif jug1 < capacityA:
pour(capacityA, jug2)
elif jug1 < (capacityB-jug2):
pour(0, (jug1+jug2))
else:
pour(jug1-(capacityB-jug2), (capacityB-jug2)+jug2)
print('''
capacityA = 5
capacityB = 7
Measure = 4
''')
print("Jug 1 \t Jug 2")
pour(0, 0)
|
nilq/baby-python
|
python
|
def isExistingClassification(t):
pass
def getSrcNodeName(dst):
"""
Get the name of the node connected to the argument dst plug.
"""
pass
def getCollectionsRecursive(parent):
pass
def disconnect(src, dst):
pass
def findVolumeShader(shadingEngine, search='False'):
"""
Returns the volume shader (as MObject) of the given shading engine (as MObject).
"""
pass
def transferPlug(src, dst):
"""
Transfer the connection or value set on plug 'src' on to the plug 'dst'.
"""
pass
def findSurfaceShader(shadingEngine, search='False'):
"""
Returns the surface shader (as MObject) of the given shading engine (as MObject).
"""
pass
def findPlug(userNode, attr):
"""
Return plug corresponding to attr on argument userNode.
If the argument userNode is None, or the attribute is not found, None
is returned.
"""
pass
def disconnectSrc(src):
"""
Disconnect a source (readable) plug from all its destinations.
Note that a single plug can be both source and destination, so this
interface makes the disconnection intent explicit.
"""
pass
def isSurfaceShaderNode(obj):
pass
def getSrcUserNode(dst):
"""
Get the user node connected to the argument dst plug.
Note: Only applies to MPxNode derived nodes
If the dst plug is unconnected, None is returned.
"""
pass
def plugSrc(dstPlug):
"""
Return the source of a connected destination plug.
If the destination is unconnected, returns None.
"""
pass
def getOverridesRecursive(parent):
pass
def nameToUserNode(name):
pass
def isShadingType(typeName):
pass
def canOverrideNode(node):
pass
def createSrcMsgAttr(longName, shortName):
"""
Create a source (a.k.a. output, or readable) message attribute.
"""
pass
def deleteNode(node):
"""
Remove the argument node from the graph.
This function is undoable.
"""
pass
def connect(src, dst):
"""
Connect source plug to destination plug.
If the dst plug is None, the src plug will be disconnected from all its
destinations (if any). If the src plug is None, the dst plug will be
disconnected from its source (if any). If both are None, this function
does nothing. If the destination is already connected, it will be
disconnected.
"""
pass
def isExistingType(t):
pass
def getDstUserNodes(src):
"""
Get the user nodes connected to the argument src plug.
Note: Only applies to MPxNode derived nodes
If the src plug is unconnected, None is returned.
"""
pass
def plugDst(srcPlug):
"""
Return the destinations of a connected source plug.
If the source is unconnected, returns None.
"""
pass
def _recursiveSearch(colList):
"""
# Fonctions to compute the number of operations when layer are switched
"""
pass
def getTotalNumberOperations(model):
pass
def _isDestination(plug):
"""
Returns True if the given plug is a destination plug, and False otherwise.
If the plug is a compond attribute it returns True if any of it's children is a
destination plug.
"""
pass
def isShadingNode(obj):
pass
def disconnectDst(dst):
"""
Disconnect a destination (writable) plug from its source.
Note that a single plug can be both source and destination, so this
interface makes the disconnection intent explicit.
"""
pass
def findDisplacementShader(shadingEngine, search='False'):
"""
Returns the displacement shader (as MObject) of the given shading engine (as MObject).
"""
pass
def _findShader(shadingEngine, attribute, classification='None'):
"""
Returns the shader connected to given attribute on given shading engine.
Optionally search for nodes from input connections to the shading engines
satisfying classification if plug to attribute is not a destination and
a classification string is specified.
"""
pass
def isInheritedType(parentTypeName, childTypeName):
pass
def getSrcNode(dst):
"""
Get the node connected to the argument dst plug.
"""
pass
def createGenericAttr(longName, shortName):
pass
def nameToExistingUserNode(name):
pass
def _transferConnectedPlug(src, dst):
pass
def connectMsgToDst(userNode, dst):
"""
Connect the argument userNode's message attribute to the
argument dst plug.
If the userNode is None the dst plug is disconnected
from its sources.
If the dst plug is None the userNode's message plug
is disconnected from its destinations
"""
pass
def isSurfaceShaderType(typeName):
pass
def notUndoRedoing(f):
"""
Decorator that will call the decorated method only if not currently in undoing or redoing.
Particularly useful to prevent callbacks from generating commands since that would clear the redo stack.
"""
pass
def createDstMsgAttr(longName, shortName):
"""
Create a destination (a.k.a. input, or writable) message attribute.
"""
pass
kNoSuchNode = []
kSupportedVectorTypes = set()
kSupportedSimpleTypes = set()
kPlugTypeMismatch = []
|
nilq/baby-python
|
python
|
import scanpy as sc
import numpy as np
import pandas as pd
from scdcdm.util import cell_composition_data as ccd
#%%
adata_ref = sc.datasets.pbmc3k_processed() # this is an earlier version of the dataset from the pbmc3k tutorial
print(adata_ref.X.shape)
#%%
cell_counts = adata_ref.obs["louvain"].value_counts()
print(cell_counts)
#%%
df = pd.DataFrame()
df = df.append(cell_counts, ignore_index=True)
print(df)
#%%
cell_counts_2 = cell_counts
new_dat = np.random.choice(1500, cell_counts_2.size)
cell_counts_2 = cell_counts_2.replace(cell_counts_2.data, new_dat)
cell_counts_2.index = cell_counts_2.index.tolist()
cell_counts_2["test_type"] = 256
print(cell_counts_2)
#%%
df = df.append(cell_counts_2, ignore_index=True)
print(df)
#%%
cell_counts_3 = cell_counts_2.iloc[[0, 3, 7, 8]]
print(cell_counts_3)
#%%
df = df.append(cell_counts_3, ignore_index=True)
print(df)
#%%
covs = dict(zip(np.arange(3), np.random.uniform(0, 1, 3)))
print(covs)
print(covs[0])
#%%
ddf = pd.DataFrame()
ddf = ddf.append(pd.Series(covs), ignore_index=True)
print(ddf)
#%%
print(adata_ref.uns_keys())
print(adata_ref.uns["neighbors"])
#%%
adata_ref.uns["cov"] = {"x1": 0, "x2": 1}
print(adata_ref.uns["cov"])
#%%
print(df.sum(axis=0).rename("n_cells").to_frame())
#%%
data = ccd.from_scanpy_list([adata_ref, adata_ref, adata_ref],
cell_type_identifier="louvain",
covariate_key="cov")
print(data.X)
print(data.var)
print(data.obs)
|
nilq/baby-python
|
python
|
"""
:type: tuple
:Size: 1.295MB
:Package Requirements: * **sklearn**
Vec-colnames and neighber matrix used in Substitute DECS. See :py:class:`.DCESSubstitute` for detail.
"""
import os
import pickle
from OpenAttack.utils import make_zip_downloader
NAME = "AttackAssist.DCES"
URL = "https://cdn.data.thunlp.org/TAADToolbox/DCES.zip"
DOWNLOAD = make_zip_downloader(URL)
def LOAD(path):
with open(os.path.join(path, 'descs.pkl'), 'rb') as f:
descs = pickle.load(f)
from sklearn.neighbors import NearestNeighbors
neigh = NearestNeighbors(** {
'algorithm': 'auto',
'leaf_size': 30,
'metric': 'euclidean',
'metric_params': None,
'n_jobs': 1,
'n_neighbors': 5,
'p': 2,
'radius': 1.0
})
return descs, neigh
|
nilq/baby-python
|
python
|
#coding:utf-8
"""
@file: db_config
@author: lyn
@contact: tonylu716@gmail.com
@python: 3.3
@editor: PyCharm
@create: 2016-11-20 12:18
@description:
sqlalchemy 配置数据库
"""
from JsonConfig import DB_Config
for path in ['.','..']:
try:
db = DB_Config(
json_file_path=
'{}/pgdb_config.json'.format(path)
).to_dict()
except:
continue
pg_url = (
'{}://{}:{}@{}:{}/{}'
).format(
db['db_type'],db['user'],db['password'],
db['host'],db['port'],db['db_name'],
)
print(pg_url)
from sqlalchemy import *
from sqlalchemy.orm import sessionmaker
Session = sessionmaker()
engine = create_engine(
name_or_url = pg_url,
echo = False
)
Session.configure(bind=engine)
|
nilq/baby-python
|
python
|
import os
import datetime
import dropbox
from app import db
from app.models import SystemLog
from app.enums import LogStatus, LogType
from flask import current_app
def download(dbx, folder, subfolder, name):
"""Download a file.
Return the bytes of the file, or None if it doesn't exist.
"""
path = '/%s/%s/%s' % (folder, subfolder.replace(os.path.sep, '/'), name)
while '//' in path:
path = path.replace('//', '/')
try:
md, res = dbx.files_download(path)
except dropbox.exceptions.HttpError as err:
print('*** HTTP error', err)
return None
data = res.content
print(len(data), 'bytes; md:', md)
return data
def log_error_to_database(error):
current_app.logger.error(error)
db.session.rollback()
error_message = str(error)
log = SystemLog(message=error_message, status=LogStatus.OPEN, type=LogType.WEBSITE,
created_at=datetime.datetime.now())
db.session.add(log)
db.session.commit()
|
nilq/baby-python
|
python
|
NumOfRows = int(input("Enter number of rows: "))
coeffcient = 1
for i in range(1, NumOfRows+1):
for space in range(1, NumOfRows-i+1):
print(" ",end="")
for j in range(0, i):
if j==0 or i==0:
coeffcient = 1
else:
coeffcient = coeffcient * (i - j)//j
print(coeffcient, end = " ")
print()
|
nilq/baby-python
|
python
|
import abc
class ORMDB(abc.ABC):
@abc.abstractmethod
def parse_sql(self, sql_str):
pass
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""This python program converts various parts of glowscript from the most
convenient format for modification into the most convenient format for
deployment.
* Take shaders from shaders/*.shader and combine them into lib/glow/shaders.gen.js
* Extract glowscript libraries list from ``untrusted/run.js``.
In the implementation, we need ``slimit`` as our dependency::
$ pip install slimit
TODO
* Come up with a less painful model for development than running this after every change
* Combine and minify lib/*.js into ide.min.js, run.min.js, and embed.min.js
"""
from __future__ import division
from __future__ import print_function
import argparse
import os
import subprocess
from functools import partial
from collections import namedtuple
from pprint import pprint
from slimit import ast
from slimit.parser import Parser as JSParser
from slimit.visitors import nodevisitor
version = "2.2dev"
src_dir = os.path.dirname(__file__)
def extract_glow_lib():
runjs = norm_path('untrusted/run.js')
parser = JSParser()
with open(runjs) as f:
tree = parser.parse(f.read())
for node in nodevisitor.visit(tree):
if (isinstance(node, ast.Assign) and
isinstance(node.left, ast.DotAccessor) and
node.left.identifier.value == 'glowscript_libraries' and
isinstance(node.right, ast.Object)):
break
else:
print('Parsing {} failed'.format(runjs))
exit(-1)
return preproc_lib_path({
prop.left.value:
[
eval(lib.value)
for lib in prop.right.items
if isinstance(lib, ast.String)
]
for prop in node.right.properties
})
def preproc_lib_path(libs):
pjoin = partial(os.path.join, src_dir, 'untrusted')
return {pkg: map(os.path.normpath, (map(pjoin, paths)))
for pkg, paths in libs.items()}
def build_shader():
shader_file = ["Export({shaders: {"]
shaders_dir = os.path.join(src_dir, 'shaders')
output_js = os.path.join(src_dir, 'lib', 'glow', 'shaders.gen.js')
for fn in os.listdir(shaders_dir):
if not fn.endswith('.shader'):
continue
name = fn.rpartition('.shader')[0]
with open(os.path.join(shaders_dir, fn), 'rt') as f:
shader_file.append('"{name}":{src!r},'.format(
name=name, src=f.read()))
shader_file.append('}});')
with open(output_js, 'w') as f:
f.writelines('\n'.join(shader_file))
print("Shader {!r} built successfully.".format(output_js))
def norm_path(p):
'''
:param p: path related to source dir
>>> norm_path('lib/glow/graph.js')
'path/to/src/dir/lib/glow/graph.js'
'''
return os.path.normpath(os.path.join(src_dir, p))
def combine(inlibs):
def gen():
yield (
"/*This is a combined, compressed file. "
"Look at https://github.com/BruceSherwood/glowscript "
"for source code and copyright information.*/"
)
yield ";(function(){})();"
for fn in inlibs:
with open(fn, 'r') as f:
yield f.read()
return "\n".join(gen())
def minify(inlibs, inlibs_nomin, outlib, no_min=False):
'''
Do unglify for ``inlibs``
:param inlibs: a list of paths which want to be minify
:param inlibs_nomin: a list of paths which do *not* want to be minify
:param no_min: if True, we build no minified libraries only.
Available environment variable:
:NODE_PATH: the path of nodejs exetuable
'''
node_cmd = os.environ.get('NODE_PATH', 'node')
uglifyjs = norm_path('build-tools/UglifyJS/bin/uglifyjs')
with open(outlib, 'w') as outf:
if not no_min:
uglify = subprocess.Popen(
[node_cmd, uglifyjs],
stdin=subprocess.PIPE,
stdout=outf,
)
uglify.communicate(combine(inlibs))
rc = uglify.wait()
if rc != 0:
print("Something went wrong on {}".format(outlib))
else:
print("Uglify {} successfully".format(outlib))
if inlibs_nomin:
outf.write(combine(inlibs_nomin))
def build_package(libs, no_min=False):
'''
:param libs: the dictionary contain all glowscript libraries::
{
"package_1": [
'lib 1'
...
],
"package_2": [
...
],
...
}
:param no_min: if True, we build no minified libraries only.
'''
Package = namedtuple('Package',
('inlibs', 'inlibs_nomin', 'outlib', 'comment'))
pkgs = (
Package(inlibs='run',
inlibs_nomin=[],
outlib='glow.{}.min.js'.format(version),
comment='glow run-time package'),
Package(inlibs='compile',
inlibs_nomin=[],
outlib='compiler.{}.min.js'.format(version),
comment='compiler package'),
Package(inlibs='RSrun',
inlibs_nomin=[],
outlib='RSrun.{}.min.js'.format(version),
comment='RapydScript run-time package'),
Package(inlibs='RScompile',
inlibs_nomin=[],
outlib='RScompiler.{}.min.js'.format(version),
comment='GlowScript package'),
)
for pkg in pkgs:
minify(libs[pkg.inlibs],
pkg.inlibs_nomin,
norm_path('package/{}'.format(pkg.outlib)),
no_min=no_min)
print('Finished {}'.format(pkg.comment))
def cmd_args():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--shader', action='store_true', default=False,
help="Build shader file 'lib/glow/shaders.gen.js' only")
parser.add_argument('--no-min', dest='no_min', action='store_true',
default=False, help="Build non-minified libraries only")
parser.add_argument('-l', '--libs', action='store_true', default=False,
help='Show glowscript libraries and exit')
return parser.parse_args()
if __name__ == '__main__':
glowscript_libraries = extract_glow_lib()
args = cmd_args()
if args.libs:
pprint(glowscript_libraries)
elif args.shader:
build_shader(glowscript_libraries)
else: # default: build all
build_shader()
build_package(glowscript_libraries, no_min=args.no_min)
|
nilq/baby-python
|
python
|
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import argparse
import io
import numpy as np
import os
from PIL import Image
import requests
import time
import yacs
import torch
from poptorch import inferenceModel, Options
from models.detector import Detector
from models.yolov4_p5 import Yolov4P5
from utils.config import get_cfg_defaults
from utils.preprocessing import ResizeImage, Pad, ToTensor
from utils.tools import load_and_fuse_pretrained_weights, post_processing, StatRecorder
def get_cfg():
cfg = get_cfg_defaults()
cfg.model.image_size = 416
cfg.inference.nms = True
cfg.inference.class_conf_threshold = 0.001
cfg.inference.iou_threshold = 0.65
cfg.inference.nms_max_detections = 10
cfg.inference.pre_nms_topk_k = 1180
cfg.ipuopts.batches_per_step = 1
cfg.model.normalization = "batch"
cfg.model.activation = "mish"
cfg.model.half = False
cfg.model.uint_io = True
cfg.model.input_channels = 3
cfg.model.micro_batch_size = 1
cfg.model.mode = "test"
cfg.model.ipu = True
return cfg
def ipu_options(opt: argparse.ArgumentParser, cfg: yacs.config.CfgNode, model: Detector):
"""Configurate the IPU options using cfg and opt options.
Parameters:
opt: opt object containing options introduced in the command line
cfg: yacs object containing the config
model[Detector]: a torch Detector Model
Returns:
ipu_opts: Options for the IPU configuration
"""
batches_per_step = cfg.ipuopts.batches_per_step
half = cfg.model.half
ipu_opts = Options()
ipu_opts.deviceIterations(batches_per_step)
ipu_opts.autoRoundNumIPUs(True)
if half:
ipu_opts.Precision.setPartialsType(torch.float16)
model.half()
return ipu_opts
def get_image_and_label(cfg):
url_sample_image = 'http://images.cocodataset.org/val2017/000000100238.jpg'
img_data = requests.get(url_sample_image).content
image = Image.open(io.BytesIO(img_data)).convert('RGB')
height, width = image.size
image_sizes = torch.Tensor([[height, width]])
label = np.array([[39, 0.319508, 0.745573, 0.020516, 0.028479],
[0, 0.484391, 0.583271, 0.360031, 0.833458],
[0, 0.685664, 0.494917, 0.284422, 0.986458],
[0, 0.869086, 0.720719, 0.207766, 0.549563],
[0, 0.168453, 0.526521, 0.333531, 0.914208],
[29, 0.166422, 0.562135, 0.118313, 0.139687],
[29, 0.480703, 0.565990, 0.135906, 0.120813],
[26, 0.591977, 0.203583, 0.045234, 0.121958],
[26, 0.349672, 0.619479, 0.150000, 0.568833],
[29, 0.708734, 0.284302, 0.118188, 0.159854]])
resizer = ResizeImage(cfg.model.image_size)
padder = Pad(cfg.model.image_size)
to_tensor = ToTensor(int(cfg.dataset.max_bbox_per_scale), "uint")
item = (image, label)
item = resizer(item)
image, label = padder(item)
image, label = to_tensor((np.array(image), label))
return image.unsqueeze(axis=0), label.unsqueeze(axis=0), image_sizes
def prepare_model(cfg, debugging_nms=False):
opt = argparse.ArgumentParser()
opt.weights = os.environ['PYTORCH_APPS_DETECTION_PATH'] + '/weights/yolov4-p5-sd.pt'
model = Yolov4P5(cfg, debugging_nms=debugging_nms)
model.eval()
model = load_and_fuse_pretrained_weights(model, opt)
model.optimize_for_inference()
if cfg.model.ipu:
ipu_opts = ipu_options(opt, cfg, model)
return inferenceModel(model, ipu_opts)
else:
return model
def post_process_and_eval(cfg, y, image_sizes, transformed_labels):
stat_recorder = StatRecorder(cfg)
processed_batch = post_processing(cfg, y, image_sizes, transformed_labels)
pruned_preds_batch = processed_batch[0]
processed_labels_batch = processed_batch[1]
for idx, (pruned_preds, processed_labels) in enumerate(zip(pruned_preds_batch, processed_labels_batch)):
stat_recorder.record_eval_stats(processed_labels, pruned_preds, image_sizes[idx])
return stat_recorder.compute_and_print_eval_metrics(print)
|
nilq/baby-python
|
python
|
from datetime import datetime
import bs4
from bs4 import BeautifulSoup
from django.contrib.auth.models import User
from bookmarks.models import Bookmark, parse_tag_string
from bookmarks.services.tags import get_or_create_tags
def import_netscape_html(html: str, user: User):
soup = BeautifulSoup(html, 'html.parser')
bookmark_tags = soup.find_all('dt')
for bookmark_tag in bookmark_tags:
_import_bookmark_tag(bookmark_tag, user)
def _import_bookmark_tag(bookmark_tag: bs4.Tag, user: User):
link_tag = bookmark_tag.a
if link_tag is None:
return
# Either modify existing bookmark for the URL or create new one
url = link_tag['href']
bookmark = _get_or_create_bookmark(url, user)
bookmark.url = url
bookmark.date_added = datetime.utcfromtimestamp(int(link_tag['add_date']))
bookmark.date_modified = bookmark.date_added
bookmark.unread = link_tag['toread'] == '1'
bookmark.title = link_tag.string
bookmark.owner = user
bookmark.save()
# Set tags
tag_string = link_tag['tags']
tag_names = parse_tag_string(tag_string)
tags = get_or_create_tags(tag_names, user)
bookmark.tags.set(tags)
bookmark.save()
def _get_or_create_bookmark(url: str, user: User):
try:
return Bookmark.objects.get(url=url, owner=user)
except Bookmark.DoesNotExist:
return Bookmark()
|
nilq/baby-python
|
python
|
###exercicio 49
n = int(input('digite um numero: '))
for c in range (1, 11):
print (' {} * {} = {}'.format(n, c, n*c))
print ('Fim!!!')
|
nilq/baby-python
|
python
|
import cv2
import numpy as np
from PyQt5.QtCore import Qt, QObject, pyqtSignal
from PyQt5.QtGui import QImage, QPixmap, QDropEvent
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QLabel, QSizePolicy
class QMapCommunicate(QObject):
drop_event = pyqtSignal(QDropEvent)
class PsQMapWidget(QWidget):
def __init__(self, qmap, model=None):
super(PsQMapWidget, self).__init__()
self.qmap = qmap
self.model = model
self.setAcceptDrops(True)
self.c = QMapCommunicate()
self.canvas = QLabel()
# self.canvas.setMaximumSize(400, 400)
sp = self.canvas.sizePolicy()
sp.setHorizontalPolicy(QSizePolicy.Expanding)
sp.setVerticalPolicy(QSizePolicy.Expanding)
self.canvas.setSizePolicy(sp)
self.setContentsMargins(0,0,0,0)
self.canvas.setMinimumSize(20, 20)
vbox = QVBoxLayout()
vbox.addWidget(self.canvas)
self.setLayout(vbox)
# self.setStyleSheet("""
# border: 1px solid red;
# padding: 0px;
# border-radius: 8px;
# margin: 0px;
# """)
# title
self.image_info_label = QLabel(parent=self.canvas)
self.image_info_label.setText(self.qmap.map_type) #background-color: rgba(31, 27, 36, .7);
self.image_info_label.setStyleSheet("""
background-color: rgba(31, 27, 36, .7);
padding: 4px;
border-radius: 4px;
margin: 1px;
font-weight: bold;
""")
def get_affine_cv(self, scale_center, rot, scale, translate):
sin_theta = np.sin(rot)
cos_theta = np.cos(rot)
a_11 = scale * cos_theta
a_21 = -scale * sin_theta
a_12 = scale * sin_theta
a_22 = scale * cos_theta
a_13 = scale_center.x() * (1 - scale * cos_theta) - scale * sin_theta * scale_center.x() + translate.x()
a_23 = scale_center.y() * (1 - scale * cos_theta) + scale * sin_theta * scale_center.y() + translate.y()
return np.array([[a_11, a_12, a_13],
[a_21, a_22, a_23]], dtype=float)
def update(self):
np_matrix_2d = self.qmap.get_matrix(dim=2)
# typically initial resize
if np_matrix_2d is None:
return
img_2d_cp = np_matrix_2d.astype(np.uint16)
# scale over min max of all slices
m_max = self.qmap.get_max_value()
m_min = self.qmap.get_min_value()
cv_image = (255 * ((img_2d_cp - m_min) / (m_max - m_min))).astype(np.uint8) # .copy()
# scale over min max of single slice
# cv_image = (255 * ((img_2d_cp - img_2d_cp.min()) / img_2d_cp.ptp())).astype(np.uint8) # .copy()
cv_image = cv2.applyColorMap(cv_image, cv2.COLORMAP_HOT) # COLORMAP_HOT
cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB, cv_image)
height, width, rgb = cv_image.shape
q_img = QImage(cv_image.data, width, height, QImage.Format.Format_RGB888)
q_pix_map = QPixmap(q_img)
q_pix_map = q_pix_map.scaled(self.canvas.size(), Qt.KeepAspectRatio)
self.canvas.setAlignment(Qt.AlignCenter)
self.canvas.setPixmap(q_pix_map)
def dragEnterEvent(self, event):
if event.mimeData().hasText():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
self.c.drop_event.emit(event)
# text = event.mimeData().text()
# print(text)
|
nilq/baby-python
|
python
|
# coding: utf-8
import torch
import torch.nn.functional as F
from helpers.utils import int_type, add_noise_to_imgs
class NumDiffAutoGradFn(torch.autograd.Function):
"""
A custom backward pass for our [s, x, y] vector when using hard attention
grid = torch.Size([16, 32, 32, 2])
grad_output_shape = torch.Size([16, 1, 32, 32])
z_grad = torch.Size([48, 1, 32, 32])
expected shape [16, 3] but got [48, 1, 32, 32]
"""
@staticmethod
def forward(ctx, z, crops, window_size, delta):
"""
In the forward pass we receive a Tensor containing the input and return
a Tensor containing the output. ctx is a context object that can be used
to stash information for backward computation. You can cache arbitrary
objects for use in the backward pass using the ctx.save_for_backward method.
"""
window_size_tensor = int_type(z.is_cuda)([window_size])
delta_tensor = int_type(z.is_cuda)([delta])
ctx.save_for_backward(z, crops, window_size_tensor, delta_tensor) # save the full extra window
_, _, cw_b, cw_e, ch_b, ch_e = NumDiffAutoGradFn._get_dims(crops, window_size)
return crops[:, :, cw_b:cw_e, ch_b:ch_e].clone() # return center crop
@staticmethod
def _get_dims(crops, window_size):
W, H = crops.shape[2:]
assert (W - window_size) % 2 == 0, "width - window_size is not divisible by 2"
assert (H - window_size) % 2 == 0, "height - window_size is not divisible by 2"
cw_b, cw_e = [int((W - window_size) / 2.) ,
int((W - window_size) / 2. + window_size)]
ch_b, ch_e = [int((H - window_size) / 2.) ,
int((H - window_size) / 2. + window_size)]
return W, H, cw_b, cw_e, ch_b, ch_e
@staticmethod
def _numerical_grads(crops, window_size, delta=1):
''' takes an enlarged crop window and returns delta-px perturbed grads
eg for delta=1:
full = 34 34 | delta = 1 | center = 1 33 1 33
xmh = 0 32 1 33
xph = 2 34 1 33
ymh = 1 33 0 32
yph = 1 33 2 34
smh = 2 32 2 32
sph = 0 34 0 34
eg for delta=2:
full = 36 36 | delta = 1 | center = 2 34 2 34
xmh = 1 33 2 34
xph = 3 35 2 34
ymh = 2 34 1 33
yph = 2 34 3 35
smh = 3 33 3 33
sph = 1 35 1 35
full = 36 36 | delta = 2 | center = 2 34 2 34
xmh = 0 32 2 34
xph = 4 36 2 34
ymh = 2 34 0 32
yph = 2 34 4 36
smh = 4 32 4 32
sph = 0 36 0 36
'''
assert len(crops.shape) == 4, "num-grad needs 4d inputs"
assert window_size > 1, "window size needs to be larger than 1"
# get dims and sanity check
W, H, cw_b, cw_e, ch_b, ch_e = NumDiffAutoGradFn._get_dims(crops, window_size)
# print("full = ", W, H, " | delta =", delta, " | center = ", cw_b, cw_e, ch_b, ch_e)
assert cw_b - delta >= 0
assert cw_e + delta < W + 1
assert ch_b - delta >= 0
assert ch_e + delta < H + 1
# [f(x+h, y) - f(x-h, y)] / delta
fx_m_h = crops[:, :, cw_b-delta:cw_e-delta, ch_b:ch_e]
fx_p_h = crops[:, :, cw_b+delta:cw_e+delta, ch_b:ch_e]
# print('xmh = ',cw_b-delta,cw_e-delta, ch_b,ch_e)
# print('xph = ', cw_b+delta,cw_e+delta, ch_b,ch_e)
dfx = (fx_p_h - fx_m_h) / delta
# [f(x, y+h) - f(x, y-h)] / delta
fy_m_h = crops[:, :, cw_b:cw_e, ch_b-delta:ch_e-delta]
fy_p_h = crops[:, :, cw_b:cw_e, ch_b+delta:ch_e+delta]
# print('ymh = ', cw_b,cw_e, ch_b-delta,ch_e-delta)
# print('yph = ', cw_b,cw_e, ch_b+delta,ch_e+delta)
dfy = (fy_p_h - fy_m_h) / delta
# approximately this: [f(x, y, s*1.01) - f(x, y, s*0.99)] / delta
fs_m_h = crops[:, :, cw_b+delta:cw_e-delta, ch_b+delta:ch_e-delta]
fs_p_h = crops[:, :, cw_b-delta:cw_e+delta, ch_b-delta:ch_e+delta]
# print("fs_m_h = ", fs_m_h.shape, " | fs_p_h = ", fs_p_h.shape)
fs_m_h = F.interpolate(fs_m_h, size=(window_size, window_size), mode='bilinear')
fs_p_h = F.interpolate(fs_p_h, size=(window_size, window_size), mode='bilinear')
# print('smh = ', cw_b+delta,cw_e-delta, ch_b+delta,ch_e-delta)
# print('sph = ', cw_b-delta,cw_e+delta, ch_b-delta,ch_e+delta)
dfs = (fs_p_h - fs_m_h) / delta # TODO: is this delta right?
# expand 1'st dim and concat, returns [B, 3, C, W, H]
grads = torch.cat(
[dfs.unsqueeze(1), dfx.unsqueeze(1), dfy.unsqueeze(1)], 1
)
# memory cleanups
del dfx; del fx_m_h; del fx_p_h
del dfy; del fy_m_h; del fy_p_h
del dfs; del fs_m_h; del fs_p_h
return grads
@staticmethod
def backward(ctx, grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
"""
z, crops, window_size, delta = ctx.saved_tensors
window_size, delta = window_size.item(), delta.item()
crops_perturbed = add_noise_to_imgs(crops) # add noise
# get the mean of the gradients over all perturbations
z_grad = torch.cat([NumDiffAutoGradFn._numerical_grads(
crops_perturbed, window_size, k+1).unsqueeze(0) for k in range(0, delta)], 0)
z_grad = torch.mean(z_grad, 0) # MC estimate over all possible perturbations
#z_grad = torch.sum(z_grad, 0) # MC estimate over all possible perturbations, TODO: try mean
z_grad = torch.matmul(grad_output.unsqueeze(1), z_grad) # connect the grads
z_grad = torch.mean(torch.mean(torch.mean(z_grad, -1), -1), -1) # reduce over y, x, chans
#z_grad = torch.sum(torch.sum(torch.sum(z_grad, -1), -1), -1) # reduce over y, x, chans TODO: try mean
del crops; del crops_perturbed # mem cleanups
return z_grad, None, None, None # no need for grads for crops, window_size and delta
|
nilq/baby-python
|
python
|
import os
import shutil
# Create a new dummy store to run tests on
from tests._stores_for_tests import _TestFixStore
# recs = [
# {'fix_id': 'Fix1', 'operands': {'arg1': '1'}, 'ncml': '<NcML1>'},
# {'fix_id': 'Fix2', 'operands': {'arg2': '2'}, 'ncml': '<NcML2>'}
# ]
recs = [
{
"fix_id": "Fix1",
"title": "Apply Fix 1",
"description": "Applies fix 1",
"category": "test_fixes",
"reference_implementation": "daops.test.test_fix1",
"operands": {"arg1": "1"},
},
{
"fix_id": "Fix2",
"title": "Apply Fix 2",
"description": "Applies fix 2",
"category": "test_fixes",
"reference_implementation": "daops.test.test_fix2",
"operands": {"arg2": "2"},
},
]
store = None
def _clear_store():
dr = _TestFixStore.config["local.base_dir"]
if os.path.isdir(dr):
shutil.rmtree(dr)
def setup_module():
_clear_store()
global store
store = _TestFixStore()
def test_publish_fix_1():
_id = "ds.1.1.1.1.1.1"
store.publish_fix(_id, recs[0])
assert store.get(_id)["fixes"] == [recs[0]]
def test_publish_fix_2():
_id = "ds.1.1.1.1.1.2"
store.publish_fix(_id, recs[1])
assert store.get(_id)["fixes"] == [recs[1]]
_id = "ds.1.1.1.1.1.1"
store.publish_fix(_id, recs[1])
assert store.get(_id)["fixes"] == [recs[0], recs[1]]
def test_withdraw_fix_1():
_id = "ds.1.1.1.1.1.1"
store.withdraw_fix(_id, "Fix1")
assert store.get(_id)["fixes"] == [recs[1]]
store.withdraw_fix(_id, "Fix2")
assert store.exists(_id) is False
def teardown_module():
# pass
_clear_store()
|
nilq/baby-python
|
python
|
import os
import lldb
class ConcurrentLazyFormatter:
def __init__(self, valobj, dict):
self.valobj = lldb.value(valobj)
def get_child_index(self, name):
return 0 # There is only ever one child
def num_children(self):
if self.has_value:
return 1
return 0
def get_child_at_index(self, index):
if index != 0:
return None # There is only ever one child
if not self.has_value:
return None
return self.valobj.value_.storage_.value.sbvalue
def update(self):
self.has_value = True if (
self.valobj
.value_
.storage_
.init
.mutex_
.lock_
.sbvalue.GetValueAsUnsigned()
) != 0 else False
return False
def has_children(self):
return self.has_value
def ConcurrentLazySummary(valobj, _dict):
computed = valobj.GetNumChildren() > 0;
return f"Is Computed={'true' if computed else 'false'}"
def __lldb_init_module(debugger, _dict):
typeName = r"(^folly::ConcurrentLazy<.*$)"
moduleName = os.path.splitext(os.path.basename(__file__))[0]
debugger.HandleCommand(
'type synthetic add '
+ f'-x "{typeName}" '
+ f'--python-class {moduleName}.ConcurrentLazyFormatter'
)
debugger.HandleCommand(
'type summary add --expand --hide-empty --no-value '
+ f'-x "{typeName}" '
+ f'--python-function {moduleName}.ConcurrentLazySummary'
)
|
nilq/baby-python
|
python
|
import os
import re
from .models import Photo
def generate_photo(file_path):
photo_path, width, height = image_utils.fit_and_save(file_path)
thumb_path = image_utils.generate_thumbnail(photo_path)
photo_path, thumb_path = (relp(rp(p), PARENT_DIR) for p in (photo_path, thumb_path))
photo = Photo(image_path=photo_path, thumbnail_path=thumb_path, width=width, height=height)
photo.save()
return photo
def save_file(file, file_path):
with open(file_path, 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
|
nilq/baby-python
|
python
|
# project/users/forms.py
from flask_wtf import Form
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired, Length, EqualTo, Email
class RegisterForm(Form):
email = StringField('Email', validators=[DataRequired(), Email(), Length(min=6, max=40)])
password = PasswordField('Password', validators=[DataRequired(), Length(min=6, max=40)])
confirm = PasswordField('Repeat Password', validators=[DataRequired(), EqualTo('password')])
class LoginForm(Form):
email = StringField('Email', validators=[DataRequired(), Email(), Length(min=6, max=40)])
password = PasswordField('Password', validators=[DataRequired()])
class EmailForm(Form):
email = StringField('Email', validators=[DataRequired(), Email(), Length(min=6, max=40)])
class PasswordForm(Form):
password = PasswordField('Password', validators=[DataRequired()])
|
nilq/baby-python
|
python
|
from .libfm import (
RegressionCallback,
ClassificationCallback,
OrderedProbitCallback,
LibFMLikeCallbackBase,
)
|
nilq/baby-python
|
python
|
import tensorflow as tf
import numpy as np
from tensorflow import keras
# Use simple nearest neighbour upsampling, 9x9, 1x1 and 5x5 convolutional layers. MSE: 0.0028712489
# Described in https://towardsdatascience.com/an-evolution-in-single-image-super-resolution-using-deep-learning-66f0adfb2d6b
# Article: https://arxiv.org/pdf/1501.00092.pdf
def createModel4(TILESIZE_INPUT):
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.InputLayer(input_shape=(TILESIZE_INPUT * TILESIZE_INPUT, )))
model.add(tf.keras.layers.Reshape( (TILESIZE_INPUT, TILESIZE_INPUT, 1) ))
model.add(tf.keras.layers.UpSampling2D(interpolation = 'bilinear'))
model.add(tf.keras.layers.UpSampling2D(interpolation = 'bilinear'))
model.add(tf.keras.layers.Conv2D(64, (9, 9), padding='same'))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Conv2D(32, (1, 1), padding='same'))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Conv2D(1, (5, 5), padding='same'))
model.add(tf.keras.layers.Flatten())
model.compile(optimizer='Adam', loss='mse', metrics=['MeanSquaredError'])
return model
# Use simple nearest neighbour upsampling, 9x9, 1x1 and 5x5 convolutional layers. MSE: 0.0028712489
# Described in https://towardsdatascience.com/an-evolution-in-single-image-super-resolution-using-deep-learning-66f0adfb2d6b
# Article: https://arxiv.org/pdf/1501.00092.pdf
def createModel2(TILESIZE_INPUT):
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.InputLayer(input_shape=(TILESIZE_INPUT * TILESIZE_INPUT, )))
model.add(tf.keras.layers.Reshape( (TILESIZE_INPUT, TILESIZE_INPUT, 1) ))
model.add(tf.keras.layers.UpSampling2D(interpolation = 'bilinear'))
model.add(tf.keras.layers.Conv2D(64, (9, 9), padding='same'))
model.add(tf.keras.layers.Dropout(DROPOUT))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Conv2D(32, (1, 1), padding='same'))
model.add(tf.keras.layers.Dropout(DROPOUT))
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.Conv2D(1, (5, 5), padding='same'))
model.add(tf.keras.layers.Dropout(DROPOUT))
model.add(tf.keras.layers.Flatten())
model.compile(optimizer='Adam', loss='mse', metrics=['MeanSquaredError'])
return model
|
nilq/baby-python
|
python
|
import logging
import os
import re
from robobrowser import RoboBrowser
logger = logging.getLogger(__name__)
id_re = re.compile("\(([^)]+)\)")
def scrape_snotel_sites(url=None):
if not url:
url = "http://www.wcc.nrcs.usda.gov/nwcc/yearcount?network=sntl&counttype=statelist&state="
browser = RoboBrowser(parser="html5lib")
browser.open(url)
browser.response.raise_for_status()
table = browser.find_all("table")[4]
sites = [] # list of sites with name and code
cols = [t.text.strip() for t in table.tr.find_all("th")]
for row in table.find_all("tr"):
if row.td and row.td.text.strip() == 'SNTL':
items = [i.text.strip() for i in row.find_all("td")]
sites.append(dict(zip(cols, items)))
return sites
def build_id(listing):
number = id_re.findall(listing["site_name"])[0]
state = listing["state"]
return "{}:{}:{}".format(number, state, "SNTL")
|
nilq/baby-python
|
python
|
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import scipy.io
from torch.utils.data import Dataset
import pickle
import os
import sys
# Data Processing
def load_file(path_to_file):
return np.genfromtxt(path_to_file,delimiter='\t',skip_header=4).astype('float32')
class WalkDataset(Dataset):
"""Walking dataset"""
def __init__(self, root_dir, sequence_length = 30, transform=None):
"""
Args:
root_dir (string): Path to the database folder.
transform (callable, optional): Optional transform to be applied on
a sample.
"""
files = os.listdir(root_dir)
for i, f in enumerate(files):
files[i]=os.path.join(root_dir,f)
self.files = files
self.transform = transform
f = open('stats','rb')
self.mu, self.sigma = pickle.load(f)
f.close()
self.sl = sequence_length
self.len = None # Total number of fixed length sequences
self.file_len = [0]*len(files) # Number of fixed length sequences in each file
self.len_cum = [0]*(len(files)+1) # Number of acumulated sequences
def load_file(path_to_file):
return np.genfromtxt(path_to_file,delimiter='\t',skip_header=4).astype('float32')
def __len__(self):
if self.len is not None:
return self.len
else:
# Calculate length of the entire fixed length dataset
for i, name in enumerate(self.files):
temp = load_file(name)
sl = temp.shape[0] # Number of timesteps
self.file_len[i] = sl//(self.sl+1) # Number of fixed length sequences in the file
self.len_cum[i+1] = np.sum(self.file_len)
self.len = np.sum(self.file_len)
return self.len
def __getitem__(self, idx):
data = []
target = []
#data_lengths = []
idxs = np.arange(len(self))
idxs = idxs.tolist()
if isinstance(idx, slice):
idxs = idxs[idx]
else:
idxs = [idxs[idx]]
last_file = -1
for i, n in enumerate(idxs):
if i>=self.len:
raise IndexError('The requested sequence does not exist')
top = self.len_cum[1]
file_n = 0
while top-1 < n:
file_n += 1
top = self.len_cum[file_n+1]
if last_file != file_n:
t = load_file(self.files[file_n])
t = np.delete(t, np.s_[-3:],1) # Delete the last 3 columns
#t = np.delete(t, np.s_[self.file_len[file_n]*(self.sl+1):],0) # Delete extra timesteps
t = np.divide((t-self.mu), self.sigma) # Normalize data
out_t = np.delete(t, np.s_[:18],1) # Delete Rigth Leg Data
last_file = file_n
actual = n + 1 - self.len_cum[file_n]
input_t = t[(actual-1)*self.sl:actual*self.sl,:]
output_t = out_t[(actual-1)*self.sl+1:actual*self.sl+1,:]
#print('first file: '+self.files[0])
#print ('file name: '+self.files[file_n])
#print('data size: {}, target size {}'.format(input_t.shape, output_t.shape))
#sys.stdout.flush()
data.append(input_t)
target.append(output_t)
if len(data)>1:
data = np.stack(data, axis = 1) # Batch Dimension
target = np.stack(target, axis = 1)
else:
data = data[0]
target = target[0]
data = torch.from_numpy(data)
target = torch.from_numpy(target)
#data = Variable(data, requires_grad = False)
#target = Variable(target, requires_grad = False)
sample = {'data':data, 'target':target}
return sample
# for i in range(len(list_files)):
# t = load_file(list_files[i])
# t = np.delete(t,np.s_[-3:],1) # Delete the last 3 columns
# input_t = np.delete(t,np.s_[-1],0) # Delete last element
# input_t = np.divide((input_t-self.mu),self.sigma) # Normalize data
# output_t = np.delete(t,np.s_[0],0) # Delete first element
# output_t = np.divide((output_t -self.mu),self.sigma) # Normalize data
# output_t = np.delete(output_t,np.s_[:18],1) # Delete Right Leg data
# data.append(input_t)
# data_lengths.append(input_t.shape[0]) # Sequence length
# target.append(output_t)
#
# largest = max(data_lengths)
# container = torch.zeros((len(data),largest,36))
# target_container = torch.zeros((len(data),largest,18))
# for i in range(len(data)):
# input_t = data[i]
# output_t = target[i]
# extra = largest-input_t.shape[0]
# container[i] = torch.from_numpy(np.concatenate([input_t,np.zeros((extra,input_t.shape[1]),dtype=input_t.dtype)],0))
# target_container[i] = torch.from_numpy(np.concatenate([output_t,np.zeros((extra,output_t.shape[1]),dtype=output_t.dtype)],0))
# container = Variable(container, requires_grad = False)
# target_container = Variable(target_container, requires_grad = False)
# data_packed = nn.utils.rnn.pack_padded_sequence(container, data_lengths,
# batch_first=True)
# target_packed = nn.utils.rnn.pack_padded_sequence(target_container, data_lengths,
# batch_first=True)
#
# sample = {'data':data_packed, 'target':target_packed}
#
# return sample
# Main model
class Net(nn.Module):
def __init__(self, hidden_dim):
super(Net, self).__init__()
self.hidden_dim = hidden_dim
self.lstm = nn.LSTM(36, hidden_dim, 1, batch_first = True, dropout = 0.5)
self.fc1 = nn.Linear(hidden_dim, 18)
self.fc2 = nn.Linear(100,18)
self.dp = nn.Dropout()
def forward(self, x, hc):
#print('input:{}, h1: {}, h2: {}'.format(x.size(),hc[0].size(),hc[1].size()))
#sys.stdout.flush()
o, hc = self.lstm(x, hc)
#o_unpacked, o_unpacked_length = nn.utils.rnn.pad_packed_sequence(o, batch_first = True)
#x_unpacked, x_unpacked_length = nn.utils.rnn.pad_packed_sequence(x, batch_first = True)
#x_l = torch.chunk(x_unpacked, 2, dim = 2)
x_l = torch.chunk(x, 2, dim = 2)
x_o = x_l[1] # Left Leg data
#o = F.relu(self.fc1(o_unpacked))
#o = F.relu(self.fc1(o))
o = self.fc1(o)
#o = self.dp(o)
#o = self.fc2(o)
o = x_o + o
#print(o.size())
#sys.stdout.flush()
#o = nn.utils.rnn.pack_padded_sequence(o, o_unpacked_length, batch_first=True)
return o, hc
def init_hidden(self,x):
#batch_size = x.batch_sizes
#batch_size = batch_size[0]
batch_size = x.size()[0]
h_0 = torch.zeros(1, batch_size, self.hidden_dim)
c_0 = torch.zeros(1, batch_size, self.hidden_dim)
return (h_0, c_0)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Donny You(donnyyou@163.com)
# Loss Manager for Object Detection.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from loss.modules.det_modules import SSDFocalLoss, SSDMultiBoxLoss
from loss.modules.det_modules import YOLOv3Loss
from loss.modules.det_modules import FRLoss
from utils.tools.logger import Logger as Log
DET_LOSS_DICT = {
'ssd_focal_loss': SSDFocalLoss,
'ssd_multibox_loss': SSDMultiBoxLoss,
'yolov3_loss': YOLOv3Loss,
'fr_loss': FRLoss
}
class DetLossManager(object):
def __init__(self, configer):
self.configer = configer
def get_det_loss(self, key):
if key not in DET_LOSS_DICT:
Log.error('Loss: {} not valid!'.format(key))
exit(1)
loss = DET_LOSS_DICT[key](self.configer)
return loss
|
nilq/baby-python
|
python
|
a = []
# append element at the end.
a.append(2)
a.append(3)
print(a)
# insert at a specific location.
a.insert(0, 5)
a.insert(10, 5)
print(a)
# when specified a position not in list, it inserts at the end.
a.insert(100, 6)
print(a)
# Deleting elements from a list.
a.remove(5) # removes the first occurence of value passed
print(a, len(a))
del a[0]
print(a, len(a))
# access the last element
print(a[-1])
# Printing a list
print(len(a))
for item in range(len(a)): # the len is not inclusive
print("(", item, ", ", a[item], ")")
print("-" * 30)
for item in range(0, len(a), 1): # the len is not inclusive
print("(", item, ", ", a[item], ")")
print("-" * 30)
# Reverse printing a list
for item in range(len(a) - 1, -1, -1): # the len is not inclusive
print("(", item, ", ", a[item], ")")
print("-" * 30)
# Jump a certain number of times.
for item in range(0, len(a), 2): # the len is not inclusive
print("(", item, ", ", a[item], ")")
print("-" * 30)
|
nilq/baby-python
|
python
|
from .Ticket import Ticket, StateTicket
################################################################################
################################################################################
################################################################################
################################################################################
class Single(Ticket):
def getStateTicket(self, diamondState):
stateTicket = None
if diamondState == "firstBase_secondBase_thirdBase":
stateTicket = BasesLoadedSingle()
elif diamondState == "secondBase_thirdBase":
stateTicket = SecondThirdSingle()
elif diamondState == "firstBase_thirdBase":
stateTicket = FirstThirdSingle()
elif diamondState == "firstBase_secondBase":
stateTicket = FirstSecondSingle()
elif diamondState == "thirdBase":
stateTicket = ThirdSingle()
elif diamondState == "secondBase":
stateTicket = SecondSingle()
elif diamondState == "firstBase":
stateTicket = FirstSingle()
else: #Bases Empty
stateTicket = EmptySingle()
return stateTicket
################################################################################
################################################################################
class BasesLoadedSingle(StateTicket):
def recordOuts(self, umpire):
pass
def recordEvents(self, pitcherId, batterId, diamond, umpire, scoreKeeper):
scoreKeeper.recordBatterH(batterId)
scoreKeeper.recordPitcherH(pitcherId)
# Runners on second and third score
for base in ("thirdBase", "secondBase"):
runnerId, onHook = diamond.popBase(base)
scoreKeeper.recordTeamRun()
scoreKeeper.recordBatterRun(runnerId)
scoreKeeper.recordBatterRbi(batterId)
scoreKeeper.recordPitcherRun(onHook)
if scoreKeeper.exOuts() < 3:
scoreKeeper.recordPitcherER(onHook)
def moveBases(self, diamond):
diamond.moveBase("firstBase", "secondBase")
def reachedBase(self, pitcherId, batterId, diamond):
diamond.reachedBase("firstBase", batterId, pitcherId)
################################################################################
################################################################################
class SecondThirdSingle(StateTicket):
def recordOuts(self, umpire):
pass
def recordEvents(self, pitcherId, batterId, diamond, umpire, scoreKeeper):
scoreKeeper.recordBatterH(batterId)
scoreKeeper.recordPitcherH(pitcherId)
# Runners on second and third score
for base in ("thirdBase", "secondBase"):
runnerId, onHook = diamond.popBase(base)
scoreKeeper.recordTeamRun()
scoreKeeper.recordBatterRun(runnerId)
scoreKeeper.recordBatterRbi(batterId)
scoreKeeper.recordPitcherRun(onHook)
if scoreKeeper.exOuts() < 3:
scoreKeeper.recordPitcherER(onHook)
def moveBases(self, diamond):
pass
def reachedBase(self, pitcherId, batterId, diamond):
diamond.reachedBase("firstBase", batterId, pitcherId)
################################################################################
################################################################################
class FirstThirdSingle(StateTicket):
def recordOuts(self, umpire):
pass
def recordEvents(self, pitcherId, batterId, diamond, umpire, scoreKeeper):
scoreKeeper.recordBatterH(batterId)
scoreKeeper.recordPitcherH(pitcherId)
# Runners on second and third score
runnerId, onHook = diamond.popBase("thirdBase")
scoreKeeper.recordTeamRun()
scoreKeeper.recordBatterRun(runnerId)
scoreKeeper.recordBatterRbi(batterId)
scoreKeeper.recordPitcherRun(onHook)
if scoreKeeper.exOuts() < 3:
scoreKeeper.recordPitcherER(onHook)
def moveBases(self, diamond):
diamond.moveBase("firstBase", "secondBase")
def reachedBase(self, pitcherId, batterId, diamond):
diamond.reachedBase("firstBase", batterId, pitcherId)
################################################################################
################################################################################
class FirstSecondSingle(StateTicket):
def recordOuts(self, umpire):
pass
def recordEvents(self, pitcherId, batterId, diamond, umpire, scoreKeeper):
scoreKeeper.recordBatterH(batterId)
scoreKeeper.recordPitcherH(pitcherId)
# Runners on second and third score
runnerId, onHook = diamond.popBase("secondBase")
scoreKeeper.recordTeamRun()
scoreKeeper.recordBatterRun(runnerId)
scoreKeeper.recordBatterRbi(batterId)
scoreKeeper.recordPitcherRun(onHook)
if scoreKeeper.exOuts() < 3:
scoreKeeper.recordPitcherER(onHook)
def moveBases(self, diamond):
pass
def reachedBase(self, pitcherId, batterId, diamond):
diamond.reachedBase("firstBase", batterId, pitcherId)
################################################################################
################################################################################
class ThirdSingle(StateTicket):
def recordOuts(self, umpire):
pass
def recordEvents(self, pitcherId, batterId, diamond, umpire, scoreKeeper):
scoreKeeper.recordBatterH(batterId)
scoreKeeper.recordPitcherH(pitcherId)
# Runners on second and third score
runnerId, onHook = diamond.popBase("thirdBase")
scoreKeeper.recordTeamRun()
scoreKeeper.recordBatterRun(runnerId)
scoreKeeper.recordBatterRbi(batterId)
scoreKeeper.recordPitcherRun(onHook)
if scoreKeeper.exOuts() < 3:
scoreKeeper.recordPitcherER(onHook)
def moveBases(self, diamond):
pass
def reachedBase(self, pitcherId, batterId, diamond):
diamond.reachedBase("firstBase", batterId, pitcherId)
################################################################################
################################################################################
class SecondSingle(StateTicket):
def recordOuts(self, umpire):
pass
def recordEvents(self, pitcherId, batterId, diamond, umpire, scoreKeeper):
scoreKeeper.recordBatterH(batterId)
scoreKeeper.recordPitcherH(pitcherId)
# Runners on second and third score
runnerId, onHook = diamond.popBase("secondBase")
scoreKeeper.recordTeamRun()
scoreKeeper.recordBatterRun(runnerId)
scoreKeeper.recordBatterRbi(batterId)
scoreKeeper.recordPitcherRun(onHook)
if scoreKeeper.exOuts() < 3:
scoreKeeper.recordPitcherER(onHook)
def moveBases(self, diamond):
pass
def reachedBase(self, pitcherId, batterId, diamond):
diamond.reachedBase("firstBase", batterId, pitcherId)
################################################################################
################################################################################
class FirstSingle(StateTicket):
def recordOuts(self, umpire):
pass
def recordEvents(self, pitcherId, batterId, diamond, umpire, scoreKeeper):
scoreKeeper.recordBatterH(batterId)
scoreKeeper.recordPitcherH(pitcherId)
def moveBases(self, diamond):
diamond.moveBase("firstBase", "secondBase")
def reachedBase(self, pitcherId, batterId, diamond):
diamond.reachedBase("firstBase", batterId, pitcherId)
################################################################################
################################################################################
class EmptySingle(StateTicket):
def recordOuts(self, umpire):
pass
def recordEvents(self, pitcherId, batterId, diamond, umpire, scoreKeeper):
scoreKeeper.recordBatterH(batterId)
scoreKeeper.recordPitcherH(pitcherId)
def moveBases(self, diamond):
pass
def reachedBase(self, pitcherId, batterId, diamond):
diamond.reachedBase("firstBase", batterId, pitcherId)
################################################################################
################################################################################
|
nilq/baby-python
|
python
|
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
import matplotlib.pyplot as plt
from PIL import Image
import cv2
from copy import deepcopy
# image_path = '/home/kshitij/PycharmProjects/Computer_Vision/Assignment_3/Question_2/iceCream1.jpg'
# image_path = '/home/kshitij/PycharmProjects/Computer_Vision/Assignment_3/Question_2/iceCream2.jpg'
image_path = '/home/kshitij/PycharmProjects/Computer_Vision/Assignment_3/Question_2/iceCream3.jpg'
image = cv2.imread(image_path)
image = cv2.resize(image, (0, 0), fx=0.25, fy=0.25)
orig_img = deepcopy(image)
sh = image.shape
flat_image = image.reshape((image.shape[0] * image.shape[1], 3))
bandwidth2 = estimate_bandwidth(flat_image, quantile=.04, n_samples=1000)
ms = MeanShift(bandwidth2, bin_seeding=True)
ms.fit(flat_image)
labels = ms.labels_
for i in range(len(labels)):
label = labels[i]
flat_image[i] = ms.cluster_centers_[label]
print("DONE CLUSTERING")
res = flat_image.reshape(sh)
# cv2.imshow('orig', orig_img)
# cv2.imshow('res', res)
cv2.imwrite('clustered_iceCream3.jpg', res)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
|
nilq/baby-python
|
python
|
import random
import networkx as nx
import matplotlib.pyplot as plt
class graph:
__dg = None
def __init__(self):
#self.__dg = nx.DiGraph()
self.__dg = nx.Graph()
def add_nodes(self, nodes):
for i in range(0, len(nodes)):
self.__dg.add_node(nodes[i])
def add_edges(self, edges):
for edge in edges:
for ele in edge['rel']:
self.__dg.add_edge(edge['word'], ele['to'])
def drawAndShow(self, size):
nx.draw(self.__dg, with_labels=True, node_size = size, node_color = self.randomcolor(size), edge_color = self.randomcolor(size))
plt.rcParams['font.sans-serif'] = ['simsun']
plt.show()
def drawAndShow1(self):
nx.draw(self.__dg, with_labels=True)
plt.rcParams['font.sans-serif'] = ['simsun']
plt.show()
def randomcolor(self, size):
rst = []
colorArr = ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']
for ele in size:
color = ""
for i in range(6):
color += colorArr[random.randint(0, 14)]
rst.append('#' + color)
return rst
|
nilq/baby-python
|
python
|
# coding: utf-8
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-节点管理(BlueKing-BK-NODEMAN) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import json
import os
from collections import defaultdict
import yaml
from django.conf import settings
from django.core.management.base import BaseCommand
from apps.utils.generate_api_js import main
def esb_json2apigw_yaml(json_file_path: str):
with open(file=json_file_path, encoding="utf-8") as esb_json_file_stream:
esb_json = json.loads(esb_json_file_stream.read())
# 对相同api路径进行聚合
api_info_gby_path = defaultdict(list)
for api_info in esb_json:
api_info_gby_path[api_info["path"]].append(api_info)
apigw_json = {
"swagger": "2.0",
"basePath": "/",
"info": {"version": "0.1", "title": "API Gateway Resources"},
"schemes": ["http"],
"paths": {},
}
for api_path, api_infos in api_info_gby_path.items():
http_method_api_info_map = {}
for api_info in api_infos:
http_method_api_info_map[api_info["registed_http_method"].lower()] = {
"operationId": f"{api_info['resource_classification'].lower()}_{api_info['resource_name']}",
"description": api_info["description"],
"tags": [api_info["resource_classification"]],
"x-bk-apigateway-resource": {
"isPublic": True,
"allowApplyPermission": True,
"matchSubpath": False,
"backend": {
"type": "HTTP",
"method": api_info["registed_http_method"].lower(),
"path": api_info["path"],
"matchSubpath": False,
"timeout": api_info.get("timeout", 0),
"upstreams": {},
"transformHeaders": {},
},
"authConfig": {"userVerifiedRequired": False},
"disabledStages": [],
},
}
apigw_json["paths"][api_path] = http_method_api_info_map
with open(
os.path.join(settings.BASE_DIR, settings.APP_CODE, "support-files", "nodeman.apigw.yaml"),
encoding="utf-8",
mode="w",
) as f:
yaml.dump(apigw_json, f, encoding="utf-8", allow_unicode=True)
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument("-g", "--is_apigw", action="store_true", help="whether for api_gateway")
parser.add_argument("--is_apigw_yaml", action="store_true", help="convert esb json to apigw yaml")
parser.add_argument("-f", type=str, help="json file path, required when select --is-apigw-yaml")
def handle(self, **kwargs):
if kwargs["is_apigw_yaml"]:
esb_json2apigw_yaml(kwargs["f"])
else:
main(kwargs["is_apigw"])
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from alphalogic_api.protocol import rpc_pb2
from alphalogic_api.attributes import Visible, Access
from alphalogic_api.multistub import MultiStub
from alphalogic_api import utils
from alphalogic_api.logger import log
from alphalogic_api.utils import Exit
class AbstractParameter(object):
"""
AbstractParameter implements ParameterService service (see `rpc.proto <https://github.com/Alphaopen/alphalogic_api/
blob/master/alphalogic_api/protocol/proto/rpc.proto>`_)
"""
def _call(self, func_name, *args, **kwargs):
return self.multi_stub.parameter_call(func_name, id=self.id, *args, **kwargs)
def name(self):
"""
Return parameter name
:rtype: unicode
"""
answer = self._call('name')
return answer.name
def display_name(self):
"""
Return parameter display name
:rtype: unicode
"""
answer = self._call('display_name')
return answer.display_name
def desc(self):
"""
Return parameter description
:rtype: unicode
"""
answer = self._call('desc')
return answer.desc
def set_display_name(self, display_name):
"""
Set parameter display name
:arg display_name: unicode
"""
answer = self._call('set_display_name', display_name=display_name)
def set_desc(self, desc):
"""
Set parameter description
:arg desc: unicode
"""
answer = self._call('set_desc', desc=desc)
def is_string(self):
"""
Return True if parameter value type is string
:rtype: bool
"""
answer = self._call('is_string')
return answer.yes
def is_long(self):
"""
Return True if parameter value type is long
:rtype: bool
"""
answer = self._call('is_long')
return answer.yes
def is_double(self):
"""
Return True if parameter value type is double
:rtype: bool
"""
answer = self._call('is_double')
return answer.yes
def is_datetime(self):
"""
Return True if parameter value type is datetime
:rtype: bool
"""
answer = self._call('is_datetime')
return answer.yes
def is_bool(self):
"""
Return True if parameter value type is bool
:rtype: bool
"""
answer = self._call('is_bool')
return answer.yes
def is_map(self):
"""
Return True if parameter value type is map
:rtype: bool
"""
answer = self._call('is_map')
return answer.yes
def is_runtime(self):
"""
Return True if parameter type is Visible.runtime
:rtype: bool
"""
answer = self._call('is_runtime')
return answer.yes
def is_setup(self):
"""
Return True if parameter type is Visible.setup
:rtype: bool
"""
answer = self._call('is_setup')
return answer.yes
def is_hidden(self):
"""
Return True if parameter type is Visible.hidden
:rtype: bool
"""
answer = self._call('is_hidden')
return answer.yes
def is_common(self):
"""
Return True if parameter type is Visible.common
:rtype: bool
"""
answer = self._call('is_common')
return answer.yes
def set_runtime(self):
"""
Set parameter type to Visible.runtime
"""
answer = self._call('set_runtime')
def set_setup(self):
"""
Set parameter type to Visible.setup
"""
answer = self._call('set_setup')
def set_hidden(self):
"""
Set parameter type to Visible.hidden
"""
answer = self._call('set_hidden')
def set_common(self):
"""
Set parameter type to Visible.common
"""
answer = self._call('set_common')
def is_read_only(self):
"""
Return True if parameter access type is Access.read_only
:rtype: bool
"""
answer = self._call('is_read_only')
return answer.yes
def is_read_write(self):
"""
Return True if parameter access type is Access.read_write
:rtype: bool
"""
answer = self._call('is_read_write')
return answer.yes
def set_read_only(self):
"""
Set parameter access type to Access.read_only
"""
answer = self._call('set_read_only')
def set_read_write(self):
"""
Set parameter access type to Access.read_write
"""
answer = self._call('set_read_write')
def is_licensed(self):
"""
Return True if parameter is the license key parameter
:rtype: bool
"""
answer = self._call('is_licensed')
return answer.yes
def set_licensed(self):
"""
Set the license key parameter
"""
answer = self._call('set_licensed')
def clear(self):
"""
Remove all predefined values from the 'choices' argument of the parameter
"""
answer = self._call('clear')
def get(self):
"""
Get parameter value
:rtype: long, float, datetime, bool or unicode
"""
answer = self._call('get')
return utils.value_from_rpc(answer.value)
def set(self, value):
"""
Set parameter value
:arg value: The value type: long, float, datetime, bool or unicode
"""
value_rpc = utils.get_rpc_value(self.value_type, value)
self._call('set', value=value_rpc)
def enums(self):
"""
Get the predefined enumeration of values from the 'choices' argument of the parameter
:rtype: List of values of long, float, datetime, bool or unicode type in a tuple as (value1, value2, value3 ….)
"""
answer = self._call('enums')
return [utils.value_from_rpc(key.value) for key in answer.enums]
def set_enum(self, value, enum_name):
"""
Add/replace enumeration member – a pair (value, name) – for the 'choices' argument of the parameter
:param value: The value type: long, float, datetime, bool or unicode
:param enum_name: enumeration member name
"""
value_rpc = rpc_pb2.Value()
utils.build_rpc_value(value_rpc, type(value), value)
answer = self._call('set_enum', enum_name=enum_name, value=value_rpc)
def set_enums(self, values):
"""
Add/replace multiple enumeration members for the 'choices' argument of the parameter
:param values: An array of values can be one of the following:
* List of values of long, float, datetime, bool or unicode type in a tuple as (value1, value2, value3 ….)
* List of enumeration members in a tuple of tuples as ((value1, 'enum_name1'), (value2, 'enum_name2'), ...)
"""
value_type = self.value_type
req = rpc_pb2.ParameterRequest(id=self.id)
for val in values:
e = req.enums.add()
if isinstance(val, tuple):
e.name = unicode(val[1])
utils.build_rpc_value(e.value, type(val[0]), val[0])
else:
e.name = unicode(val)
utils.build_rpc_value(e.value, type(val), val)
self.multi_stub.call_helper('set_enums', fun_set=MultiStub.parameter_fun_set, request=req,
stub=self.multi_stub.stub_parameter)
def has_enum(self, enum_name):
"""
Return True if parameter has a predefined enumeration of values
:rtype: bool
"""
answer = self._call('has_enum', enum_name=enum_name)
return answer.yes
def owner(self):
"""
Return ID of the parameter's owner
:rtype: uint64
"""
answer = self._call('owner')
return answer.owner
class Parameter(AbstractParameter):
"""
Class Parameter inherits all data elements and methods from :class:`~alphalogic_api.objects.parameter.AbstractParameter`.
"""
index_number = 0
def __init__(self, *args, **kwargs):
self.index_number = Parameter.index_number
Parameter.index_number += 1
for arg in kwargs:
self.__dict__[arg] = kwargs[arg]
self.visible = kwargs.get('visible', Visible.runtime)
self.access = kwargs.get('access', Access.read_write)
self.callback = kwargs.get('callback', None)
if 'value_type' not in kwargs:
raise Exception('value_type not found in Parameter')
if kwargs['value_type'] not in [bool, int, long, float, datetime.datetime, unicode, list, dict]:
raise Exception('value_type={0} is unknown'.format(kwargs['value_type']))
self.default = kwargs.get('default')
self.choices = kwargs.get('choices', None)
def set_multi_stub(self, multi_stub):
self.multi_stub = multi_stub
def __getattr__(self, item):
if item == 'val':
return self.get()
if item in self.__dict__:
return self.__dict__[item]
def __setattr__(self, attr, value):
if attr == 'val' and self.parameter_name.lower() == 'name': # exclude change 'name' value
log.error('Attempt to change name of device')
raise Exit
if attr == 'val':
if value is not None:
self.set(value)
elif attr in ['value_type', 'visible', 'access', 'default', 'choices', 'multi_stub', 'id',
'parameter_name', 'callback', 'index_number']:
self.__dict__[attr] = value
return self
def set_choices(self):
if isinstance(self.choices, tuple):
self.clear()
self.set_enums(self.choices)
def get_copy(self):
return Parameter(value_type=self.value_type, default=self.default, visible=self.visible,
access=self.access, callback=self.callback, choices=self.choices)
class ParameterBool(Parameter):
def __new__(cls, *args, **kwargs):
return Parameter(*args, value_type=bool, **kwargs)
class ParameterLong(Parameter):
def __new__(cls, *args, **kwargs):
return Parameter(*args, value_type=int, **kwargs)
class ParameterDouble(Parameter):
def __new__(cls, *args, **kwargs):
return Parameter(*args, value_type=float, **kwargs)
class ParameterDatetime(Parameter):
def __new__(cls, *args, **kwargs):
return Parameter(*args, value_type=datetime.datetime, **kwargs)
class ParameterString(Parameter):
def __new__(cls, *args, **kwargs):
return Parameter(*args, value_type=unicode, **kwargs)
class ParameterList(Parameter):
def __new__(cls, *args, **kwargs):
return Parameter(*args, value_type=list, **kwargs)
class ParameterDict(Parameter):
def __new__(cls, *args, **kwargs):
return Parameter(*args, value_type=dict, **kwargs)
|
nilq/baby-python
|
python
|
from unittest.mock import ANY, patch
from arcsecond import ArcsecondAPI
from click.testing import CliRunner
from oort.cli.cli import upload
from oort.server.errors import InvalidOrgMembershipOortCloudError, UnknownOrganisationOortCloudError
from oort.shared.models import Organisation
from tests.utils import (
TEL_DETAILS,
TEL_UUID,
TEST_LOGIN_ORG_SUBDOMAIN,
TEST_LOGIN_USERNAME,
save_arcsecond_test_credentials,
use_test_database
)
@use_test_database
def test_cli_upload_missing_folders():
save_arcsecond_test_credentials()
runner = CliRunner()
result = runner.invoke(upload)
assert result.exit_code != 0 and result.exception
assert 'Missing argument \'FOLDER\'.' in result.output
@use_test_database
def test_cli_upload_unknown_organisation():
save_arcsecond_test_credentials()
runner = CliRunner()
error = {'detail': 'unknown organisation'}
with patch.object(ArcsecondAPI, 'read', return_value=(None, error)) as mock_method_read:
result = runner.invoke(upload, ['.', '-o', 'dummy_org'])
assert result.exit_code != 0
assert isinstance(result.exception, UnknownOrganisationOortCloudError)
mock_method_read.assert_called_once_with('dummy_org')
@use_test_database
def test_cli_upload_unknown_membership():
save_arcsecond_test_credentials(subdomain='saao')
Organisation.create(subdomain='saao')
Organisation.create(subdomain=TEST_LOGIN_ORG_SUBDOMAIN)
# Make the test
runner = CliRunner()
result = runner.invoke(upload, ['.', '-o', TEST_LOGIN_ORG_SUBDOMAIN])
assert result.exit_code != 0
assert isinstance(result.exception, InvalidOrgMembershipOortCloudError)
@use_test_database
def test_cli_upload_missing_org_telescope():
save_arcsecond_test_credentials()
# Create the watch command org to pass the org check.
Organisation.create(subdomain=TEST_LOGIN_ORG_SUBDOMAIN)
# Make the test
runner = CliRunner()
with patch.object(ArcsecondAPI, 'list', return_value=([], None)) as mock_method_read:
result = runner.invoke(upload, ['.', '-o', TEST_LOGIN_ORG_SUBDOMAIN])
assert result.exit_code == 0
assert f"Here is a list of existing telescopes for organisation {TEST_LOGIN_ORG_SUBDOMAIN}:" in result.output
mock_method_read.assert_called_once()
@use_test_database
def test_cli_upload_with_org_telescope_answer_nope():
# Prepare
save_arcsecond_test_credentials()
Organisation.create(subdomain=TEST_LOGIN_ORG_SUBDOMAIN)
runner = CliRunner()
# Run
with patch.object(ArcsecondAPI, 'read', return_value=(TEL_DETAILS, None)) as mock_method_read, \
patch('oort.uploader.engine.walker.walk') as mock_method_walk, \
patch('builtins.input', return_value='Nope'):
result = runner.invoke(upload, ['.', '-o', TEST_LOGIN_ORG_SUBDOMAIN, '-t', TEL_UUID])
# Assert
assert result.exit_code == 0
assert f"arcsecond username: @{TEST_LOGIN_USERNAME}" in result.output.lower()
assert f"uploading to organisation account '{TEST_LOGIN_ORG_SUBDOMAIN}'" in result.output.lower()
mock_method_walk.assert_not_called()
mock_method_read.assert_called_once()
@use_test_database
def test_cli_upload_with_org_telescope_answer_yep():
# Prepare
save_arcsecond_test_credentials()
Organisation.create(subdomain=TEST_LOGIN_ORG_SUBDOMAIN)
runner = CliRunner()
with patch.object(ArcsecondAPI, 'read', return_value=(TEL_DETAILS, None)) as mock_method_read, \
patch('oort.uploader.engine.walker.walk') as mock_method_walk, \
patch('builtins.input', return_value='\n'):
# Run
result = runner.invoke(upload, ['.', '-o', TEST_LOGIN_ORG_SUBDOMAIN, '-t', TEL_UUID])
# Assert
assert result.exit_code == 0
assert f"arcsecond username: @{TEST_LOGIN_USERNAME}" in result.output.lower()
assert f"uploading to organisation account '{TEST_LOGIN_ORG_SUBDOMAIN}'" in result.output.lower()
mock_method_read.assert_called_once()
mock_method_walk.assert_called_once_with('.', ANY, False, debug=False)
|
nilq/baby-python
|
python
|
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)
import tensorflow as tf
x = tf.placeholder(tf.float32,[None,784])
|
nilq/baby-python
|
python
|
from aiogram import types
from asyncio import sleep
from typing import Union
import NekoGram
async def default_start_function(message: Union[types.Message, types.CallbackQuery]):
neko: NekoGram.Neko = message.conf['neko']
if not await neko.storage.check_user_exists(user_id=message.from_user.id):
lang = message.from_user.language_code if message.from_user.language_code in neko.texts.keys() \
else neko.storage.default_language
await neko.storage.create_user(user_id=message.from_user.id, language=lang)
await sleep(0.1) # Sleep a bit to make sure user is added to the database
else:
# Completely erase user data
await neko.storage.set_user_data(user_id=message.from_user.id)
data = await neko.build_text(text='start', user=message.from_user)
if isinstance(message, types.Message):
await message.reply(text=data.data.text, parse_mode=data.data.parse_mode,
disable_web_page_preview=data.data.no_preview, reply=False,
disable_notification=data.data.silent, reply_markup=data.data.markup)
await message.delete()
else:
await message.message.edit_text(text=data.data.text, disable_web_page_preview=data.data.no_preview,
reply_markup=data.data.markup, parse_mode=data.data.parse_mode)
|
nilq/baby-python
|
python
|
import os
import pandas as pd
import nltk
import re
import spacy
from sklearn.feature_extraction.text import CountVectorizer
from data_module.corpus import data_operations as do
from data_module.corpus.clean import remove_single_quotes
def get_top_n_words(corpus, n=None):
"""
List the top n words in a vocabulary according
to occurrence in a text corpus.
get_top_n_words(["I love Python", "Python is a language programming",
"Hello world", "I love the world"]) ->
[('python', 2),
('world', 2),
('love', 2),
('hello', 1),
('is', 1),
('programming', 1),
('the', 1),
('language', 1)]
"""
vec = CountVectorizer().fit(corpus)
bag_of_words = vec.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [
(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]
words_freq = sorted(words_freq, key = lambda x: x[1], reverse=True)
return words_freq[:n]
########################### EXECUTION STARTS BELLOW ############################
nlp = spacy.load('en_core_web_sm')
# nlp = spacy.load('en')
DATA_FOLDER = os.path.join(
os.path.dirname(__file__), 'data/microservices/')
RAW_ANSWERS_FILE = DATA_FOLDER + 'raw/answers.csv'
# change this data source to change the corpus
RAW_QUESTIONS_FILE = DATA_FOLDER + 'nontech_discussions.csv'
RAW_UNION_FILE = DATA_FOLDER + 'raw/relevance_union.csv'
CLEAN_UNION_FILE = DATA_FOLDER + 'clean/relevance_union.csv'
rawdata_answers = pd.read_csv(RAW_ANSWERS_FILE)
rawdata_questions = pd.read_csv(RAW_QUESTIONS_FILE)
rawdata_union = pd.read_csv(RAW_UNION_FILE)
open_tags = [
r'
', r'
', r'<br>', r'<em>', r'</em>', r'<p>',
r'</p>', r'<ul>', r'</ul>', r'<li>', r'</li>',
r'<strong>', r'</strong>', r'<img src=[^>]*>',
r'<blockquote>', r'</blockquote>', r'<ol>', r'</ol>', r'<hrs>'
r'<sub>', r'</sub>', r'<h3>', r'</h3>', r'<h1>', r'</h1>', r'<h2>',
r'</h2>', r'<h4>', r'</h4>', r'<h5>', r'</h5>', r'<div[^>]*>', r'</div>',
r'<pre>', r'</pre>', r'<code>', r'</code>', r'<a href=[^>]*>',r'(</a>)',
r'<br>', r'<br/>'
]
closed_tags = [
(r'<a href=[^>]*>',r'(</a>)'),
(r'<div[^>]*>',r'(</div>)'),
(r'<code>', r'</code>'),
(r'<blockquote>',r'</blockquote>')
]
stop_words = set(open('stopword_list.txt', 'r').read().split("\n"))
dscs = rawdata_questions
punctuation_rgx = r"[^()[\]<>+\-_=\*|\^{}$&%#@!?.,:;/\"]+"
for idx, question in dscs.iterrows():
file_name = 'instance_' + str(question["Id"]) + ".txt"
file_path = DATA_FOLDER + 'clean/nontech/' + file_name
with open(file_path, '+w') as fh:
# Cleaning questions body fom HTML
for closed_tag in closed_tags:
question["Body"] = do.remove_block_tag(closed_tag, question["Body"])
for open_tag in open_tags:
question["Body"] = do.remove_single_tag(open_tag, question["Body"])
# Cleaning question title
stage_one = re.findall(punctuation_rgx, question['Title'].lower())
stage_one = [word for line in stage_one for word in line.split()]
stage_one = list(map(remove_single_quotes, stage_one))
stage_two = re.findall(r"[^\d]+", " ".join(stage_one))
stage_two = [word for line in stage_two for word in line.split()]
words_to_remove = stop_words.intersection(set(stage_two))
stage_three = [
word for word in stage_two if word not in words_to_remove]
leemed_title = nlp(" ".join(stage_three))
leemed_title = " ".join(
[word.lemma_ for word in leemed_title
if word.lemma_ != "-PRON-" and word.lemma_ != "'s"])
# Cleaning question body
stage_one = re.findall(punctuation_rgx, question['Body'].lower())
stage_one = [word for line in stage_one for word in line.split()]
stage_one = list(map(remove_single_quotes, stage_one))
stage_two = re.findall(r"[^\d]+", " ".join(stage_one))
stage_two = [word for line in stage_two for word in line.split()]
words_to_remove = stop_words.intersection(set(stage_two))
stage_three = [
word for word in stage_two if word not in words_to_remove]
leemed_body = nlp(" ".join(stage_three))
leemed_body = " ".join(
[word.lemma_ for word in leemed_body
if word.lemma_ != "-PRON-" and word.lemma_ != "'s"])
fh.write(leemed_title)
fh.write('\n\n')
fh.write(leemed_body)
# Cleaning answers
answers = rawdata_answers.loc[
rawdata_answers.ParentId == question["Id"]]
for idx, answer in answers.iterrows():
for closed_tag in closed_tags:
answer["Body"] = do.remove_block_tag(closed_tag, answer["Body"])
for open_tag in open_tags:
answer["Body"] = do.remove_single_tag(open_tag, answer["Body"])
# Cleaning answer body
stage_one = re.findall(punctuation_rgx, answer['Body'].lower())
stage_one = [word for line in stage_one for word in line.split()]
stage_one = list(map(remove_single_quotes, stage_one))
stage_two = re.findall(r"[^\d]+", " ".join(stage_one))
stage_two = [word for line in stage_two for word in line.split()]
words_to_remove = stop_words.intersection(set(stage_two))
stage_three = [
word for word in stage_two if word not in words_to_remove]
leemed_answer = nlp(" ".join(stage_three))
leemed_answer = " ".join(
[word.lemma_ for word in leemed_answer
if word.lemma_ != "-PRON-" and word.lemma_ != "'s"])
fh.write('\n\n')
fh.write(leemed_answer)
print("Discussion %d printed" % question['Id'])
|
nilq/baby-python
|
python
|
import unittest
from config import TEST_DB_PATH
from repositories.item_repository import ItemRepository
from utilities.csv_utilities import clear_csv, read_csv
class TestItemRepository(unittest.TestCase):
def setUp(self):
clear_csv(TEST_DB_PATH)
self.item_repo = ItemRepository(TEST_DB_PATH)
self.book = ['Patrick Ness', 'The Knife of Never Letting Go', '2008', '0001']
self.blog = [
'Eero Tarmo', 'Soundi.fi',
'Androgyyniä laulua ja irtonaista kävelyä – tältä kuulostaa Arto Tuunelan kevät',
'https://www.soundi.fi/jutut/pariisin-kevat-nokkamies-kasasi-kevat-aiheisen-soittolistan/',
'13.3.2016', '0002'
]
self.video = [
'Christian Duenas', 'Pygame Menu System Tutorial Part 2: Building the Menu and States',
'https://youtu.be/bmRFi7-gy5Y', '24.7.2020', '0003'
]
self.item = ["Pablo Picasso", "Ls Demoiselles d'Avignon", "1907"]
def test_initialises_repo(self):
self.assertTrue(isinstance(self.item_repo._items, dict))
def test_create_book(self):
book = self.item_repo.create('book', self.book)
self.assertTrue(book)
def test_create_blog(self):
blog = self.item_repo.create('blog', self.blog)
self.assertTrue(blog)
def test_create_video(self):
video = self.item_repo.create('video', self.video)
self.assertTrue(video)
def test_create_nonexisting_type(self):
item = self.item_repo.create('painting', self.item)
self.assertFalse(item)
def test_create_duplicate_item(self):
self.item_repo.create('book', self.book)
new_item = self.item_repo.create('book', self.book)
self.assertFalse(new_item)
def test_list_items_empty(self):
items = self.item_repo.list_items()
self.assertEqual(len(items), 0)
def test_list_items_not_empty(self):
self.item_repo.create('book', self.book)
items = self.item_repo.list_items()
self.assertEqual(len(items), 1)
def test_duplicate_not_added_to_items(self):
self.item_repo.create('book', self.book)
self.item_repo.create('book', self.book)
items = self.item_repo.list_items()
self.assertEqual(len(items), 1)
def test_delete_item(self):
self.item_repo.create('book', self.book)
self.item_repo.create('blog', self.blog)
self.item_repo.create('video', self.video)
self.item_repo.delete_item('0001')
items = self.item_repo.list_items()
for item in items:
self.assertNotEqual(item[1], '0001')
def test_save_file_not_empty(self):
self.item_repo.create('book', self.book)
self.item_repo.create('blog', self.blog)
self.item_repo.create('video', self.video)
self.item_repo.save()
data = read_csv(TEST_DB_PATH)
self.assertEqual(len(data), 3)
def test_delete_all(self):
self.item_repo.create('book', self.book)
self.item_repo.create('blog', self.blog)
self.item_repo.create('video', self.video)
self.item_repo.delete_all_items()
items = self.item_repo.list_items()
self.assertFalse(items)
def test_delete_all_clear_csv(self):
self.item_repo.create('book', self.book)
self.item_repo.create('blog', self.blog)
self.item_repo.create('video', self.video)
self.item_repo.delete_all_items()
self.item_repo.save()
data = read_csv(TEST_DB_PATH)
self.assertFalse(len(data), 0)
def test_find_existing_item(self):
self.item_repo.create('book', self.book)
item = self.item_repo.find_by_id('0001')
self.assertEqual(item['id'], '0001')
def test_find_nonexisting_item_empty_repo(self):
self.assertIsNone(self.item_repo.find_by_id('0004'))
def test_find_nonexisting_item_nonempty_repo(self):
self.item_repo.create('book', self.book)
self.assertIsNone(self.item_repo.find_by_id('0004'))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import sys
import subprocess
import requests
from datetime import datetime
def main(args):
if len(args) < 2:
print('usage: sync-cloudflare.py [output path]')
return
output = args[1]
now = datetime.utcnow().isoformat()
ips = []
resp = requests.get('https://www.cloudflare.com/ips-v4')
resp.raise_for_status()
ips.extend([ip for ip in resp.text.strip().split('\n')])
resp = requests.get('https://www.cloudflare.com/ips-v6')
resp.raise_for_status()
ips.extend([ip for ip in resp.text.strip().split('\n')])
new_ips = '\n'.join(ips)
try:
with open('/tmp/cloudflare_origin_pulls.cache', 'r') as f:
cached_ips = f.read()
except FileNotFoundError:
cached_ips = ''
if new_ips == cached_ips:
return
lines = []
lines.append('#')
lines.append(f'# Cloudflare Origin Pulls ({now})')
lines.append('#')
lines.append('')
for ip in ips:
lines.append(f'set_real_ip_from {ip};')
lines.append('')
lines.append('real_ip_header CF-Connecting-IP;')
lines.append('')
content = '\n'.join(lines)
with open(output, 'w') as f:
f.write(content)
print(content)
subprocess.run(['/usr/sbin/nginx', '-t'], check=True)
subprocess.run(['/usr/bin/systemctl', 'reload', 'nginx'], check=True)
with open('/tmp/cloudflare_origin_pulls.cache', 'w') as f:
f.write(new_ips)
if __name__ == '__main__':
main(sys.argv)
|
nilq/baby-python
|
python
|
import os
import sys
from urllib2 import urlopen
import json
import ConfigParser
config = ConfigParser.ConfigParser()
config.readfp(open(r'config.txt'))
apikey = config.get('Sonarr Config', 'apikey')
host = config.get('Sonarr Config', 'host')
port = config.get('Sonarr Config', 'port')
url = 'http://'+host+':'+port+'/api/series?apikey='+apikey
response = urlopen(url)
shows = json.loads(response.read())
shownames = []
for show in shows:
# now song is a dictionary
shownames.append(show['title'])
found = shownames
for f in found:
print f
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from kivy.app import App
from kivy.animation import Animation
from kivy.uix.floatlayout import FloatLayout
from kivy.graphics import Line
from kivy.gesture import Gesture, GestureDatabase
from kivy.vector import Vector
from kivy.properties import NumericProperty,BooleanProperty
from museolib.my_gestures import squares
from museolib.widgets.validation import Valid
def simplegesture(name, point_list):
"""
A simple helper function
"""
g = Gesture()
g.add_stroke(point_list)
g.normalize()
g.name = name
return g
class GestureBoard(FloatLayout):
"""
Our application main widget, derived from touchtracer example, use data
constructed from touches to match symboles loaded from my_gestures.
"""
edge_size = NumericProperty(0)
exists=BooleanProperty(False)
def __init__(self, *args, **kwargs):
super(GestureBoard, self).__init__()
self.gdb = GestureDatabase()
# add pre-recorded gestures to database
for square in squares:
self.gdb.add_gesture(square)
def on_touch_down(self, touch):
super(GestureBoard,self).on_touch_down(touch)
if self.collide_point(*touch.pos):
if App.get_running_app().config.getboolean('museotouch','validation') == True:
# start collecting points in touch.ud
# create a line to display the points
userdata = touch.ud
userdata['line'] = Line(points=(touch.x, touch.y))
return True
def on_touch_move(self, touch):
if self.collide_point(*touch.pos):
super(GestureBoard,self).on_touch_move(touch)
# store points of the touch movement
try:
touch.ud['line'].points += [touch.x, touch.y]
return True
except (KeyError) as e:
pass
def on_touch_up(self, touch):
super(GestureBoard,self).on_touch_up(touch)
# touch is over, display informations, and check if it matches some
# known gesture.
try :
g = simplegesture(
'',
list(zip(touch.ud['line'].points[::2], touch.ud['line'].points[1::2]))
)
self.edge_size = (self.stroke_length(list(zip(touch.ud['line'].points[::2], touch.ud['line'].points[1::2]))))/4
if self.edge_size < 150:
self.edge_size=150
# gestures to my_gestures.py
except :
return
# use database to find the more alike gesture, if any
g2 = self.gdb.find(g, minscore=0.9)
if g2:
for index,square in enumerate(squares) :
if (g2[1] == square):
if index in [0,1]:
square_pos=[touch.x,touch.y-self.edge_size]
elif index in [2,3]:
square_pos=[touch.x-self.edge_size,touch.y-self.edge_size]
elif index in [4,5]:
square_pos=[touch.x-self.edge_size,touch.y]
elif index in [6,7]:
square_pos=[touch.x,touch.y]
valid = Valid(pos=(0,0),size=[self.edge_size,self.edge_size],rotation=180,scale_min=0.5)
self.add_widget(valid)
Animation(pos=square_pos,d=.3,rotation=0,transition='out_sine').start(valid)
self.exists=True
break
def stroke_length(self,l):
distance = 0
for index, point in enumerate(l) :
if index < len(l)-1:
distance += Vector(point).distance(l[index+1])
return distance
|
nilq/baby-python
|
python
|
import numpy as np
from sympy import simplify, integrate, zeros, S, Matrix, symbols, pi, cos, sin
from .funcs_aproximacion import producto_asecas
def producto_escalar_trigono(f, g, var=symbols('x'), a=-pi, b=pi, I=None, numeric=False):
"""Aplica el producto escalar <f,g> = 1/(2pi) ∫_[-pi]^[pi] f.g
Args:
f (funcion): f
g (funcion): g
var (variable): variable de integración
a (int, optional): limite inferior de integracion. Defaults to 0.
b (int, optional): limite superior de integracion. Defaults to 1.
I (list, optional): Si no es None, lista de valores sobre los que hacer un sumatorio discreto. Defaults to None.
numeric (bool, optional): si True, realiza una aproximación numérica de la integral usando un método de sympy.
Returns:
funcion, float: Valor del producto escalar. Se devuelve como funcion si tiene variables.
"""
prod = producto_asecas(f, g, var, a, b, I, numeric)
return simplify(prod / (2 * pi))
def coefs_fourier(f, var=symbols('x'), I=[0, 1], n_coefs=2):
"""Genera los coeficientes de la serie de fourier. Esta es la versión continua, donde los coeficientes se calculan usando la expresión de la integral.
Args:
f (funcion): Función a aproximar
var (variable, optional): Variable de la función. Defaults to symbols('x').
I (list, optional): Intervalo de aproximación de la función. Defaults to [0, 1].
n_coefs (int, optional): Número de coeficientes de la serie a generar. Defaults to 2.
Returns:
dict_coefs: {a_0, a_1, b_1, a_2, b_2, ...}
"""
dict_coefs = {}
dict_coefs['a0'] = simplify(1 / pi * integrate(f, (var, I[0], I[1])))
for i in range(1, n_coefs):
dict_coefs[f'a{i}'] = simplify(1 / pi * integrate(f * cos(i * var), (var, I[0], I[1])))
dict_coefs[f'b{i}'] = simplify(1 / pi * integrate(f * sin(i * var), (var, I[0], I[1])))
return dict_coefs
def coefs_fourier_discr(f, var=symbols('x'), I=[0, 1], n_coefs=2, m=10):
"""Genera los coeficientes de la serie de fourier. Esta es la versión donde la integral se aproxima como un sumatorio discreto de m términos sobre I.
Args:
f (funcion): Función a aproximar
var (variable, optional): Variable de la función. Defaults to symbols('x').
I (list, optional): Intervalo de aproximación de la función. Defaults to [0, 1].
n_coefs (int, optional): Número de coeficientes de la serie a generar. Defaults to 2.
m (int, optional): Número de elementos en los que dividir I para el sumatorio.
Returns:
dict_coefs: {a_0, a_1, b_1, a_2, b_2, ...}
"""
dict_coefs = {}
lista_xk = np.linspace(I[0], I[1], 2 * m)
dict_coefs['a0'] = np.sum([f.subs(var, xk) * cos(0 * xk) for xk in lista_xk]) / m
for i in range(1, n_coefs):
dict_coefs[f'a{i}'] = np.sum([f.evalf(subs={var: S(xk)}) * cos(S(i) * xk) for xk in lista_xk]) / m
dict_coefs[f'b{i}'] = np.sum([f.evalf(subs={var: S(xk)}) * sin(S(i) * xk) for xk in lista_xk]) / m
return dict_coefs
def serie_fourier(f, var=symbols('x'), I=[0, 1], n_coefs=3, discreto=False, m=10):
"""Genera la serie de Fourier para la función f sobre un intervalo.
Args:
f (funcion): Función a aproximar
var (variable, optional): Variable de la función. Defaults to symbols('x').
I (list, optional): Intervalo de aproximación de la función. Defaults to [0, 1].
n_coefs (int, optional): Número de coeficientes de la serie a generar. Defaults to 2.
discreto (bool, optional): Si True, genera una aproximación discreta de los coeficientes empleando m términos.
m (int, optional): Número de elementos en los que dividir I para el sumatorio.
Returns:
funcion: Función polinómica con la serie de Fourier.
"""
if discreto:
dict_coefs = coefs_fourier_discr(f, var, I, n_coefs, m)
else:
dict_coefs = coefs_fourier(f, var, I, n_coefs)
serie_fourier = dict_coefs['a0'] / 2
for i in range(1, n_coefs):
serie_fourier += dict_coefs[f'a{i}'] * cos(i * var) + dict_coefs[f'b{i}'] * sin(i * var)
return simplify(serie_fourier)
|
nilq/baby-python
|
python
|
from ..util.conversion import physical_compatible
from ..util import config, conversion
class df(object):
"""Top-level class for DF classes"""
def __init__(self,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a DF object
INPUT:
ro= (None) distance scale
vo= (None) velocity scale
OUTPUT:
HISTORY:
2016-02-28 - Written - Bovy (UofT)
"""
# Parse ro and vo
if ro is None:
self._ro= config.__config__.getfloat('normalization','ro')
self._roSet= False
else:
self._ro= conversion.parse_length_kpc(ro)
self._roSet= True
if vo is None:
self._vo= config.__config__.getfloat('normalization','vo')
self._voSet= False
else:
self._vo= conversion.parse_velocity_kms(vo)
self._voSet= True
return None
def _check_consistent_units(self):
"""Internal function to check that the set of units for this object is consistent with that for the potential"""
assert physical_compatible(self,self._pot), 'Physical conversion for the DF object is not consistent with that of the Potential given to it'
def turn_physical_off(self):
"""
NAME:
turn_physical_off
PURPOSE:
turn off automatic returning of outputs in physical units
INPUT:
(none)
OUTPUT:
(none)
HISTORY:
2017-06-05 - Written - Bovy (UofT)
"""
self._roSet= False
self._voSet= False
return None
def turn_physical_on(self,ro=None,vo=None):
"""
NAME:
turn_physical_on
PURPOSE:
turn on automatic returning of outputs in physical units
INPUT:
ro= reference distance (kpc; can be Quantity)
vo= reference velocity (km/s; can be Quantity)
OUTPUT:
(none)
HISTORY:
2016-06-05 - Written - Bovy (UofT)
2020-04-22 - Don't turn on a parameter when it is False - Bovy (UofT)
"""
if not ro is False: self._roSet= True
if not vo is False: self._voSet= True
if not ro is None and ro:
self._ro= conversion.parse_length_kpc(ro)
if not vo is None and vo:
self._vo= conversion.parse_velocity_kms(vo)
return None
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
# Copyright (c) 2021 by Fred Morris Tacoma WA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This is the RPZ view.
We try to keep what's in the telemetry view and what's actually being served by
the zone in sync.
"""
import traceback
import logging
from time import time
import asyncio
from asyncio import Queue
import socket
import dns.message
import dns.rdatatype as rdatatype
import dns.rcode as rcode
import dns.query
from dns.exception import DNSException
# The class has a different name (UpdateMessage) in dnspython 2.x. This is for
# version 1.x.
from dns.update import Update as Updater
PRINT_COROUTINE_ENTRY_EXIT = None
TTL = 600
class Connection(object):
"""Manages a queue of requests and replies."""
def __init__(self, event_loop, server, rpz, statistics):
self.event_loop = event_loop
self.server = server
self.rpz = rpz
self.keep_open = False
self.reader_ = None
self.writer_ = None
if statistics:
self.request_stats = statistics.Collector('dns request')
else:
self.request_stats = None
return
def close(self):
if self.writer_ is None:
return
self.writer_.close()
self.reader_ = self.writer_ = None
return
def timer(self, collection):
"""Helper for marshalling coroutines."""
collection = getattr(self, collection)
return collection and collection.start_timer() or None
async def make_request(self, request=None, timer=None):
"""Sends the request and returns the response.
Context is a TCP connection. Request and response are the naked
request / response bytes respectively. Over the wire, this method
handles the prepended length bytes.
"""
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT('> rpz.Connection.make_request()')
# Open a connection if necessary.
if self.writer_ is None and request is not None:
self.reader_, self.writer_ = await asyncio.open_connection(self.server, 53)
# Send the request, and await a response.
if request is not None:
self.writer_.write( len(request).to_bytes(2, byteorder='big') + request )
await self.writer_.drain()
response_length = int.from_bytes( await self.reader_.read(2), byteorder='big')
response = b''
while response_length:
resp = await self.reader_.read(response_length)
if not len(resp):
break
response += resp
response_length -= len(resp)
# Close it? Ok, close it.
if not self.keep_open:
self.close()
if self.request_stats:
timer.stop()
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT('< rpz.Connection.make_request()')
return response
class ZoneEntry(object):
"""All data for an FQDN.
This means the PTR record.
"""
TXT_RECORD_REFRESH_MINUTES = 30
def __init__(self, name):
self.name = name
self.ptr = None
self.last_refresh = time()
return
def update(self, rtype, rval):
if rtype == rdatatype.PTR:
self.ptr = rval
self.last_refresh = time()
return
def needs_refresh(self):
"""Returns True if the TXT record needs to be refreshed."""
return time() - self.last_refresh > self.TXT_RECORD_REFRESH_MINUTES
class ZoneContents(dict):
"""This is a dictionary of zone entries.
The key is the name and the value is a ZoneEntry.
"""
def update_entry(self, rname, rtype, rval):
rname = rname.split('.in-addr.arpa')[0] + '.in-addr.arpa'
if rname not in self:
self[rname] = ZoneEntry( rname )
self[rname].update(rtype, rval)
return
class EndOfZone(EOFError):
pass
class TelemetryPackage(dict):
"""When we load from the RPZ this is what we get."""
CONVERSIONS = dict(
ptr = lambda x:x,
depth = lambda x:int(x),
first = lambda x:float(x),
last = lambda x:float(x),
count = lambda x:int(x),
trend = lambda x:float(x),
score = lambda x:float(x)
)
COMPLETE = set(CONVERSIONS.keys())
def complete(self):
return self.COMPLETE <= set(self.keys())
def set(self, k, v):
self[k] = self.CONVERSIONS[k](v)
return
def reverse_to_address(reverse_ref):
"""Take the reverse lookup qname format and extract the address."""
return '.'.join(reversed(reverse_ref.split('.in-addr.arpa')[0].split('.')))
def address_to_reverse(address):
"""Take the address and construct the reverse lookup format."""
return '{}.in-addr.arpa'.format('.'.join(reversed(address.split('.'))))
class RPZ(object):
RDTYPES = set((rdatatype.PTR, rdatatype.TXT))
def __init__(self, event_loop, server, rpz, statistics):
self.event_loop = event_loop
self.server = server
self.rpz = rpz.lower().rstrip('.') + '.'
self.task_queue = Queue(loop=event_loop)
self.processor_ = self.event_loop.create_task(self.queue_processor())
self.conn_ = Connection(event_loop, server, rpz, statistics)
self.contents = ZoneContents()
if statistics:
self.axfr_stats = statistics.Collector("rpz axfr")
self.delete_stats = statistics.Collector("rpz delete")
self.update_stats = statistics.Collector("rpz update")
else:
self.axfr_stats = self.delete_stats = self.update_stats = None
return
async def close(self):
"""Cleanup, such as cancelling the queue processor."""
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT('> rpz.RPZ.close()')
self.conn_.close()
self.processor_.cancel()
await self.processor_
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT('< rpz.RPZ.close()')
return
def timer(self, collection):
"""Helper for marshalling coroutines."""
collection = getattr(self, collection)
return collection and collection.start_timer() or None
def create_task(self, task):
"""Create a task in the RPZ queue."""
self.task_queue.put_nowait(task)
return
def process_zone_rec(self, qname, rtype, rval, telemetry_view):
"""Updates the memory view from a zone rec.
This updates both the RPZ view and the telemetry view.
"""
self.contents.update_entry(qname, rtype, rval)
# For telemetry updates, wait until we have all of the info for an update.
if qname not in self.telemetry_data_cache:
self.telemetry_data_cache[qname] = TelemetryPackage()
if rtype == rdatatype.PTR:
self.telemetry_data_cache[qname].set( 'ptr', rval )
elif rtype == rdatatype.TXT:
for kv in rval.strip('"').split(','):
self.telemetry_data_cache[qname].set( *kv.split('=',1) )
if not self.telemetry_data_cache[qname].complete():
return
# We have all of the requisite data...
telemetry_view.update_resolution_from_rpz(
reverse_to_address(qname.replace(self.rpz, '').lower()),
self.telemetry_data_cache[qname]
)
# Done.
del self.telemetry_data_cache[qname]
return
async def load_axfr_(self, associations):
"""Internal method."""
keep_open = self.conn_.keep_open
self.conn_.keep_open = True
# Construct the AXFR request and send it.
req = dns.message.make_query(self.rpz, 'AXFR')
wire_req = req.to_wire()
wire_resp = await self.conn_.make_request(wire_req, self.conn_.timer('request_stats'))
resp = dns.message.from_wire(wire_resp, xfr=True)
if resp.rcode() != rcode.NOERROR:
self.global_error('axfr - rcode', resp)
return
answer = resp.answer
# First record has to be an SOA record.
if answer[0].rdtype != rdatatype.SOA:
self.global_error('axfr - no soa', resp)
return
if answer[0].name.to_text().lower() != self.rpz:
self.global_error('axfr - wrong soa', resp)
return
answer = answer[1:]
self.telemetry_data_cache = {}
# Process and update the in-memory view.
try:
while True:
for rrset in answer:
name = rrset.name.to_text().lower()
if rrset.rdtype == rdatatype.SOA and name == self.rpz:
raise EndOfZone
if rrset.rdtype not in self.RDTYPES:
continue
for rr in rrset:
self.process_zone_rec(name, rrset.rdtype, rr.to_text(), associations)
wire_resp = await self.conn_.make_request(None, self.conn_.timer('request_stats')) # Get another response, no question asked.
resp = dns.message.from_wire(wire_resp, xfr=True)
if resp.rcode() != rcode.NOERROR:
self.global_error('axfr - rcode 2', resp)
break
answer = resp.answer
except EndOfZone:
pass
self.telemetry_data_cache = None
# Close the connection if we jimmied it open.
self.conn_.keep_open = keep_open
if not keep_open and self.task_queue.empty():
self.conn_.close()
return
async def load_axfr(self, associations, timer):
"""Use AXFR to load the RPZ context and populate associations.
associations is a db.Associator object.
An AXFR results in one or more query responses being sent by the server.
"""
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT('> rpz.RPZ.load_axfr()')
await self.load_axfr_(associations)
if self.axfr_stats:
timer.stop()
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT('< rpz.RPZ.load_axfr()')
return
async def delete_(self, address):
"""Internal method."""
qname = address_to_reverse(address)
if qname not in self.contents:
return
# Remove it from the memory view.
del self.contents[qname]
# Remove it from the zone.
qname += '.' + self.rpz
update = Updater(self.rpz)
update.delete(qname)
wire_req = update.to_wire()
wire_resp = await self.conn_.make_request(wire_req, self.conn_.timer('request_stats'))
resp = dns.message.from_wire(wire_resp)
if resp.rcode() != rcode.NOERROR:
self.global_error('delete', resp)
return
async def delete(self, address, timer):
"""Remove the specified address from the RPZ.
The address is a string.
"""
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT('> rpz.RPZ.delete()')
await self.delete_(address)
if self.delete_stats:
timer.stop()
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT('< rpz.RPZ.delete()')
return
async def update_(self, address, score):
"""Internal method."""
# Get the expected resolution. When this is called by RearView.solve() the
# best resolution has been determined.
if not address.best_resolution:
logging.error(
'update_(): best_resolution is None for address:{} with resolutions:{}'.format(
address.address, [ k for k in address.resolutions.keys() ]
)
)
return
qname = address_to_reverse(address.address)
ptr_value = address.best_resolution.chain[-1].rstrip('.') + '.'
zone_entry = self.contents.get(qname)
if ( zone_entry is not None
and zone_entry.ptr is not None
and ptr_value == zone_entry.ptr
and not zone_entry.needs_refresh()
):
return
self.contents.update_entry(qname, rdatatype.PTR, ptr_value)
qname = qname + '.' + self.rpz
update = Updater(self.rpz)
update.delete(qname)
update.add(qname, TTL, rdatatype.PTR, ptr_value)
update.add(qname, TTL, rdatatype.TXT,
','.join((
'{}={}'.format( k, v )
for k, v in
( ('depth', len(address.best_resolution.chain)),
('first', address.best_resolution.first_seen),
('last', address.best_resolution.last_seen),
('count', address.best_resolution.query_count),
('trend', address.best_resolution.query_trend),
('score', score)
)
))
)
wire_req = update.to_wire()
wire_resp = await self.conn_.make_request(wire_req, self.conn_.timer('request_stats'))
try:
resp = dns.message.from_wire(wire_resp)
except DNSException as e:
logging.error('Invalid DNS response to ({} -> {})'.format(address.address, ptr_value))
self.conn_.close()
return
if resp.rcode() != rcode.NOERROR:
self.global_error('update', resp)
return
async def update(self, address, score, timer):
"""Add / update the specified address in the RPZ.
The address is a db.Address object.
"""
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT('> rpz.RPZ.update()')
await self.update_(address, score)
if self.update_stats:
timer.stop()
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT('< rpz.RPZ.update()')
return
async def queue_processor(self):
"""Processes the task queue, in coordination with the Connection."""
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT('> rpz.RPZ.queue_processor()')
while True:
task = await self.task_queue.get()
self.conn_.keep_open = not self.task_queue.empty()
try:
await task
self.task_queue.task_done()
except Exception as e:
traceback.print_exc()
self.event_loop.stop()
return
# This actually never exits.
if PRINT_COROUTINE_ENTRY_EXIT:
PRINT_COROUTINE_ENTRY_EXIT('< rpz.RPZ.queue_processor()')
return
def global_error(self, text, response):
"""Called when an error related to processing DNS requests occurs.
All this does at the moment is log the text, but it can be overridden
if needed.
"""
logging.error(text)
return
|
nilq/baby-python
|
python
|
from enum import Enum
class Emoji(Enum):
PLAY = "*play*"
FACE_HEARTS = "<3"
FACE_TONGUE = ":P"
FACE_SMILE = ":D"
FACE_CRY_LAUGH = "xD"
FACE_HALO = "=D"
FACE_NERDY = "*nerdy*"
FACE_TEAR = "*cry*"
FACE_SAD = ":("
FACE_ZZZ = "*sleep*"
FACE_ROLLING_EYES = "*rolling-eyes*"
FILM = "*watch*"
POPCORN = "*popcorn*"
FACE_KISS = "*kiss*"
FACE_BLUSH_SMILE = "*smiling*"
FACE_THINK = "*thinking*"
THUMBS_UP = ":thumbsup:"
THUMBS_DOWN = ":thumbsdown:"
PIZZA = "*pizza*"
PARTY = "*party*"
FOLDED_HANDS = "*folded-hands*"
FIRE = "*hot*"
|
nilq/baby-python
|
python
|
"""Dependency injector resource provider unit tests."""
import asyncio
import unittest2 as unittest
from dependency_injector import containers, providers, resources, errors
# Runtime import to get asyncutils module
import os
_TOP_DIR = os.path.abspath(
os.path.sep.join((
os.path.dirname(__file__),
'../',
)),
)
import sys
sys.path.append(_TOP_DIR)
from asyncutils import AsyncTestCase
def init_fn(*args, **kwargs):
return args, kwargs
class ResourceTests(unittest.TestCase):
def test_is_provider(self):
self.assertTrue(providers.is_provider(providers.Resource(init_fn)))
def test_provided_instance_provider(self):
provider = providers.Resource(init_fn)
self.assertIsInstance(provider.provided, providers.ProvidedInstance)
def test_injection(self):
resource = object()
def _init():
_init.counter += 1
return resource
_init.counter = 0
class Container(containers.DeclarativeContainer):
resource = providers.Resource(_init)
dependency1 = providers.List(resource)
dependency2 = providers.List(resource)
container = Container()
list1 = container.dependency1()
list2 = container.dependency2()
self.assertEqual(list1, [resource])
self.assertIs(list1[0], resource)
self.assertEqual(list2, [resource])
self.assertIs(list2[0], resource)
self.assertEqual(_init.counter, 1)
def test_init_function(self):
def _init():
_init.counter += 1
_init.counter = 0
provider = providers.Resource(_init)
result1 = provider()
self.assertIsNone(result1)
self.assertEqual(_init.counter, 1)
result2 = provider()
self.assertIsNone(result2)
self.assertEqual(_init.counter, 1)
provider.shutdown()
def test_init_generator(self):
def _init():
_init.init_counter += 1
yield
_init.shutdown_counter += 1
_init.init_counter = 0
_init.shutdown_counter = 0
provider = providers.Resource(_init)
result1 = provider()
self.assertIsNone(result1)
self.assertEqual(_init.init_counter, 1)
self.assertEqual(_init.shutdown_counter, 0)
provider.shutdown()
self.assertEqual(_init.init_counter, 1)
self.assertEqual(_init.shutdown_counter, 1)
result2 = provider()
self.assertIsNone(result2)
self.assertEqual(_init.init_counter, 2)
self.assertEqual(_init.shutdown_counter, 1)
provider.shutdown()
self.assertEqual(_init.init_counter, 2)
self.assertEqual(_init.shutdown_counter, 2)
def test_init_class(self):
class TestResource(resources.Resource):
init_counter = 0
shutdown_counter = 0
def init(self):
self.__class__.init_counter += 1
def shutdown(self, _):
self.__class__.shutdown_counter += 1
provider = providers.Resource(TestResource)
result1 = provider()
self.assertIsNone(result1)
self.assertEqual(TestResource.init_counter, 1)
self.assertEqual(TestResource.shutdown_counter, 0)
provider.shutdown()
self.assertEqual(TestResource.init_counter, 1)
self.assertEqual(TestResource.shutdown_counter, 1)
result2 = provider()
self.assertIsNone(result2)
self.assertEqual(TestResource.init_counter, 2)
self.assertEqual(TestResource.shutdown_counter, 1)
provider.shutdown()
self.assertEqual(TestResource.init_counter, 2)
self.assertEqual(TestResource.shutdown_counter, 2)
def test_init_not_callable(self):
provider = providers.Resource(1)
with self.assertRaises(errors.Error):
provider.init()
def test_init_and_shutdown(self):
def _init():
_init.init_counter += 1
yield
_init.shutdown_counter += 1
_init.init_counter = 0
_init.shutdown_counter = 0
provider = providers.Resource(_init)
result1 = provider.init()
self.assertIsNone(result1)
self.assertEqual(_init.init_counter, 1)
self.assertEqual(_init.shutdown_counter, 0)
provider.shutdown()
self.assertEqual(_init.init_counter, 1)
self.assertEqual(_init.shutdown_counter, 1)
result2 = provider.init()
self.assertIsNone(result2)
self.assertEqual(_init.init_counter, 2)
self.assertEqual(_init.shutdown_counter, 1)
provider.shutdown()
self.assertEqual(_init.init_counter, 2)
self.assertEqual(_init.shutdown_counter, 2)
def test_shutdown_of_not_initialized(self):
def _init():
yield
provider = providers.Resource(_init)
result = provider.shutdown()
self.assertIsNone(result)
def test_initialized(self):
provider = providers.Resource(init_fn)
self.assertFalse(provider.initialized)
provider.init()
self.assertTrue(provider.initialized)
provider.shutdown()
self.assertFalse(provider.initialized)
def test_call_with_context_args(self):
provider = providers.Resource(init_fn, 'i1', 'i2')
self.assertEqual(provider('i3', i4=4), (('i1', 'i2', 'i3'), {'i4': 4}))
def test_fluent_interface(self):
provider = providers.Resource(init_fn) \
.add_args(1, 2) \
.add_kwargs(a3=3, a4=4)
self.assertEqual(provider(), ((1, 2), {'a3': 3, 'a4': 4}))
def test_set_args(self):
provider = providers.Resource(init_fn) \
.add_args(1, 2) \
.set_args(3, 4)
self.assertEqual(provider.args, tuple([3, 4]))
def test_clear_args(self):
provider = providers.Resource(init_fn) \
.add_args(1, 2) \
.clear_args()
self.assertEqual(provider.args, tuple())
def test_set_kwargs(self):
provider = providers.Resource(init_fn) \
.add_kwargs(a1='i1', a2='i2') \
.set_kwargs(a3='i3', a4='i4')
self.assertEqual(provider.kwargs, {'a3': 'i3', 'a4': 'i4'})
def test_clear_kwargs(self):
provider = providers.Resource(init_fn) \
.add_kwargs(a1='i1', a2='i2') \
.clear_kwargs()
self.assertEqual(provider.kwargs, {})
def test_call_overridden(self):
provider = providers.Resource(init_fn, 1)
overriding_provider1 = providers.Resource(init_fn, 2)
overriding_provider2 = providers.Resource(init_fn, 3)
provider.override(overriding_provider1)
provider.override(overriding_provider2)
instance1 = provider()
instance2 = provider()
self.assertIs(instance1, instance2)
self.assertEqual(instance1, ((3,), {}))
self.assertEqual(instance2, ((3,), {}))
def test_deepcopy(self):
provider = providers.Resource(init_fn, 1, 2, a3=3, a4=4)
provider_copy = providers.deepcopy(provider)
self.assertIsNot(provider, provider_copy)
self.assertEqual(provider.args, provider_copy.args)
self.assertEqual(provider.kwargs, provider_copy.kwargs)
self.assertIsInstance(provider, providers.Resource)
def test_deepcopy_initialized(self):
provider = providers.Resource(init_fn)
provider.init()
with self.assertRaises(errors.Error):
providers.deepcopy(provider)
def test_deepcopy_from_memo(self):
provider = providers.Resource(init_fn)
provider_copy_memo = providers.Resource(init_fn)
provider_copy = providers.deepcopy(
provider,
memo={id(provider): provider_copy_memo},
)
self.assertIs(provider_copy, provider_copy_memo)
def test_deepcopy_args(self):
provider = providers.Resource(init_fn)
dependent_provider1 = providers.Factory(list)
dependent_provider2 = providers.Factory(dict)
provider.add_args(dependent_provider1, dependent_provider2)
provider_copy = providers.deepcopy(provider)
dependent_provider_copy1 = provider_copy.args[0]
dependent_provider_copy2 = provider_copy.args[1]
self.assertNotEqual(provider.args, provider_copy.args)
self.assertIs(dependent_provider1.cls, dependent_provider_copy1.cls)
self.assertIsNot(dependent_provider1, dependent_provider_copy1)
self.assertIs(dependent_provider2.cls, dependent_provider_copy2.cls)
self.assertIsNot(dependent_provider2, dependent_provider_copy2)
def test_deepcopy_kwargs(self):
provider = providers.Resource(init_fn)
dependent_provider1 = providers.Factory(list)
dependent_provider2 = providers.Factory(dict)
provider.add_kwargs(d1=dependent_provider1, d2=dependent_provider2)
provider_copy = providers.deepcopy(provider)
dependent_provider_copy1 = provider_copy.kwargs['d1']
dependent_provider_copy2 = provider_copy.kwargs['d2']
self.assertNotEqual(provider.kwargs, provider_copy.kwargs)
self.assertIs(dependent_provider1.cls, dependent_provider_copy1.cls)
self.assertIsNot(dependent_provider1, dependent_provider_copy1)
self.assertIs(dependent_provider2.cls, dependent_provider_copy2.cls)
self.assertIsNot(dependent_provider2, dependent_provider_copy2)
def test_deepcopy_overridden(self):
provider = providers.Resource(init_fn)
object_provider = providers.Object(object())
provider.override(object_provider)
provider_copy = providers.deepcopy(provider)
object_provider_copy = provider_copy.overridden[0]
self.assertIsNot(provider, provider_copy)
self.assertEqual(provider.args, provider_copy.args)
self.assertIsInstance(provider, providers.Resource)
self.assertIsNot(object_provider, object_provider_copy)
self.assertIsInstance(object_provider_copy, providers.Object)
def test_deepcopy_with_sys_streams(self):
provider = providers.Resource(init_fn)
provider.add_args(sys.stdin, sys.stdout, sys.stderr)
provider_copy = providers.deepcopy(provider)
self.assertIsNot(provider, provider_copy)
self.assertIsInstance(provider_copy, providers.Resource)
self.assertIs(provider.args[0], sys.stdin)
self.assertIs(provider.args[1], sys.stdout)
self.assertIs(provider.args[2], sys.stderr)
def test_repr(self):
provider = providers.Resource(init_fn)
self.assertEqual(
repr(provider),
'Resource({0}, initialized={1})'.format(
init_fn,
provider.initialized,
)
)
class AsyncResourceTest(AsyncTestCase):
def test_init_async_function(self):
resource = object()
async def _init():
await asyncio.sleep(0.001)
_init.counter += 1
return resource
_init.counter = 0
provider = providers.Resource(_init)
result1 = self._run(provider())
self.assertIs(result1, resource)
self.assertEqual(_init.counter, 1)
result2 = self._run(provider())
self.assertIs(result2, resource)
self.assertEqual(_init.counter, 1)
self._run(provider.shutdown())
def test_init_async_generator(self):
resource = object()
async def _init():
await asyncio.sleep(0.001)
_init.init_counter += 1
yield resource
await asyncio.sleep(0.001)
_init.shutdown_counter += 1
_init.init_counter = 0
_init.shutdown_counter = 0
provider = providers.Resource(_init)
result1 = self._run(provider())
self.assertIs(result1, resource)
self.assertEqual(_init.init_counter, 1)
self.assertEqual(_init.shutdown_counter, 0)
self._run(provider.shutdown())
self.assertEqual(_init.init_counter, 1)
self.assertEqual(_init.shutdown_counter, 1)
result2 = self._run(provider())
self.assertIs(result2, resource)
self.assertEqual(_init.init_counter, 2)
self.assertEqual(_init.shutdown_counter, 1)
self._run(provider.shutdown())
self.assertEqual(_init.init_counter, 2)
self.assertEqual(_init.shutdown_counter, 2)
def test_init_async_class(self):
resource = object()
class TestResource(resources.AsyncResource):
init_counter = 0
shutdown_counter = 0
async def init(self):
await asyncio.sleep(0.001)
self.__class__.init_counter += 1
return resource
async def shutdown(self, resource_):
await asyncio.sleep(0.001)
self.__class__.shutdown_counter += 1
assert resource_ is resource
provider = providers.Resource(TestResource)
result1 = self._run(provider())
self.assertIs(result1, resource)
self.assertEqual(TestResource.init_counter, 1)
self.assertEqual(TestResource.shutdown_counter, 0)
self._run(provider.shutdown())
self.assertEqual(TestResource.init_counter, 1)
self.assertEqual(TestResource.shutdown_counter, 1)
result2 = self._run(provider())
self.assertIs(result2, resource)
self.assertEqual(TestResource.init_counter, 2)
self.assertEqual(TestResource.shutdown_counter, 1)
self._run(provider.shutdown())
self.assertEqual(TestResource.init_counter, 2)
self.assertEqual(TestResource.shutdown_counter, 2)
def test_init_with_error(self):
async def _init():
raise RuntimeError()
provider = providers.Resource(_init)
future = provider()
self.assertTrue(provider.initialized)
self.assertTrue(provider.is_async_mode_enabled())
# Disable default exception handling to prevent output
asyncio.get_event_loop().set_exception_handler(lambda loop, context: ...)
with self.assertRaises(RuntimeError):
self._run(future)
# Restore default exception handling
asyncio.get_event_loop().set_exception_handler(None)
self.assertFalse(provider.initialized)
self.assertTrue(provider.is_async_mode_enabled())
def test_init_and_shutdown_methods(self):
async def _init():
await asyncio.sleep(0.001)
_init.init_counter += 1
yield
await asyncio.sleep(0.001)
_init.shutdown_counter += 1
_init.init_counter = 0
_init.shutdown_counter = 0
provider = providers.Resource(_init)
self._run(provider.init())
self.assertEqual(_init.init_counter, 1)
self.assertEqual(_init.shutdown_counter, 0)
self._run(provider.shutdown())
self.assertEqual(_init.init_counter, 1)
self.assertEqual(_init.shutdown_counter, 1)
self._run(provider.init())
self.assertEqual(_init.init_counter, 2)
self.assertEqual(_init.shutdown_counter, 1)
self._run(provider.shutdown())
self.assertEqual(_init.init_counter, 2)
self.assertEqual(_init.shutdown_counter, 2)
def test_shutdown_of_not_initialized(self):
async def _init():
yield
provider = providers.Resource(_init)
provider.enable_async_mode()
result = self._run(provider.shutdown())
self.assertIsNone(result)
def test_concurrent_init(self):
resource = object()
async def _init():
await asyncio.sleep(0.001)
_init.counter += 1
return resource
_init.counter = 0
provider = providers.Resource(_init)
result1, result2 = self._run(
asyncio.gather(
provider(),
provider()
),
)
self.assertIs(result1, resource)
self.assertEqual(_init.counter, 1)
self.assertIs(result2, resource)
self.assertEqual(_init.counter, 1)
|
nilq/baby-python
|
python
|
# coding: utf-8
# This block of code fetches the data, and defines a function that
# splits the data into test/train, and into batches.
# Note that this function will only download the data once. Subsequent
# calls will load the data from the hard drive
def MNIST_Loaders(train_batch_size, test_batch_size=None):
if test_batch_size is None:
test_batch_size = train_batch_size
normalize = transforms.Normalize((0.1307,), (0.3081,))
Clean = transforms.Compose([transforms.ToTensor(), normalize])
#!wget www.di.ens.fr/~lelarge/MNIST.tar.gz
#!tar -zxvf MNIST.tar.gz
train_data = datasets.MNIST('./', train=True,
download=True, transform=Clean)
test_data = datasets.MNIST('./', train=False,
download=True, transform=Clean)
train_loader = torch.utils.data.DataLoader(train_data,
batch_size=train_batch_size)
test_loader = torch.utils.data.DataLoader(test_data,
batch_size=test_batch_size)
return train_loader, test_loader
|
nilq/baby-python
|
python
|
import sqlite3
# Configure database
connection = sqlite3.connect('map.db')
# Cursor for execute DB command
c = connection.cursor()
# CREATE TABLE
# c.execute("""CREATE TABLE map (
# id integer,
# lat real,
# lng real,
# comment text
# )""")
# INSERT VALUE
# c.execute("INSERT INTO map VALUES ('3','35.276718482995214','136.25179933602564','Hikone Castle has Hikonyan')")
# connection.commit()
# SELECT TABLE
c.execute("SELECT * FROM map")
print(c.fetchall())
connection.commit()
connection.close()
|
nilq/baby-python
|
python
|
from .models import Language
from rest_framework import serializers
class LanguageSerializer(serializers.ModelSerializer):
class Meta:
model = Language
fields = ('id', 'name', 'paradigm')
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.