content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import sys
from tkinter import *
from slideShow import SlideShow
if len(sys.argv) == 2:
picdir = sys.argv[1]
else:
picdir = '../gifs'
root = Tk()
Label(root, text="Two embedded slide shows: each side uses after() loop").pack()
SlideShow(msecs=200,
parent=root, picdir=picdir, bd=3, relief=SUNKEN).pack(side=LEFT)
SlideShow(msecs=200,
parent=root, picdir=picdir, bd=3, relief=SUNKEN).pack(side=RIGHT)
root.mainloop()
|
nilq/baby-python
|
python
|
class Transform(object):
"""Transform coordinate systems: scale / translate"""
def __init__(self, scale, translate):
"""Set up the parameters for the transform"""
self.scale = scale
self.translate = translate
def forward(self, pt):
"""From the original box to (-1,-1),(1,1) box """
return (pt[0] - self.translate[0]) / self.scale[0], \
(pt[1] - self.translate[1]) / self.scale[1]
def backward(self, pt):
"""From the (-1,-1),(1,1) box back to the original box"""
return (pt[0] * self.scale[0]) + self.translate[0], \
(pt[1] * self.scale[1]) + self.translate[1]
def get_transform(box):
"""Get a transform object for a bounding box to transform to (-1,-1),(1,1)
"""
# target = ((-1.,-1.),(1.,1.))
# tcx, tcy = (0., 0.)
tdx, tdy = (2., 2.)
(sxmin, symin), (sxmax, symax) = box
scx, scy = (sxmin + sxmax) * 0.5, (symin + symax) * 0.5
sdx, sdy = (sxmax - sxmin), (symax - symin)
scale = max(sdx / tdx, sdy / tdy)
# print("scale {}".format(scale))
return Transform((scale, scale), (scx, scy))
def get_box(pts):
"""Returns tight fitting bounding box (axis aligned) around set of points
"""
assert len(pts)
it = iter(pts)
ll = list(next(it))
ur = list(ll[:])
for pt in it:
if pt[0] < ll[0]:
ll[0] = pt[0]
if pt[1] < ll[1]:
ll[1] = pt[1]
if pt[0] > ur[0]:
ur[0] = pt[0]
if pt[1] > ur[1]:
ur[1] = pt[1]
return tuple(ll), tuple(ur)
def _test_transform():
box = ((70000., 100000.), (75000., 125000.))
t = get_transform(box)
assert t.forward(box[0]) == (-0.2, -1)
assert t.forward((72500., 112500.)) == (0, 0)
assert t.forward(box[1]) == (0.2, 1)
assert t.backward((-0.2, -1)) == box[0]
assert t.backward((0.2, 1)) == box[1]
assert t.backward((0, 0)) == (72500, 112500)
def _test_box():
pts = [(78000., 100000.), (75000., 125000.)]
assert get_box(pts) == ((75000.0, 100000.0), (78000.0, 125000.0))
if __name__ == "__main__":
_test_transform()
_test_box()
|
nilq/baby-python
|
python
|
from facenet_pytorch import InceptionResnetV1
class SiameseNet(nn.Module):
def __init__(self):
super().__init__()
self.encoder = InceptionResnetV1(pretrained='vggface2')
emb_len = 512
self.last = nn.Sequential(
nn.Linear(4*emb_len, 200, bias=False),
nn.BatchNorm1d(200, eps=0.001, momentum=0.1, affine=True),
nn.ReLU(),
nn.Linear(200, 1)
)
def forward(self, input1, input2):
emb1 = self.encoder(input1)
emb2 = self.encoder(input2)
x1 = torch.pow(emb1, 2) - torch.pow(emb2, 2)
x2 = torch.pow(emb1 - emb2, 2)
x3 = emb1 * emb2
x4 = emb1 + emb2
x = torch.cat((x1,x2,x3,x4), dim=1)
x = self.last(x)
return x
|
nilq/baby-python
|
python
|
Import("env")
from SCons.Script import COMMAND_LINE_TARGETS
def buildWeb(source, target, env):
env.Execute("cd web; pnpm build")
print("Successfully built webui")
def convertImages(source, target, env):
env.Execute("cd converter; go run .")
print("Successfully converted images")
env.AddPreAction("buildprog", convertImages)
env.AddPreAction("uploadfs", buildWeb)
|
nilq/baby-python
|
python
|
from . import tables
from . unquote import unquote as _unquote
__all__ = 'Quote', 'unquote'
SHORT_ASCII = '\\u{0:04x}'
LONG_ASCII = '\\u{0:04x}\\u{1:04x}'
def Quote(b):
return _QUOTES[bool(b)]
def unquote(s, strict=True):
return _QUOTES[s[0] == tables.SINGLE].remove(s, strict)
class _Quote:
def __init__(self, table):
for k, v in table.items():
setattr(self, k, v)
def add(self, s, ensure_ascii=False):
if ensure_ascii:
re, replace = self.escape_ascii_re, self._replace_ascii
else:
re, replace = self.escape_re, self._replace_unicode
return self.quote + re.sub(replace, s) + self.quote
def remove(self, s, strict=False):
return _unquote(self, s, strict)
def _replace_unicode(self, match):
return self.escape_dict[match.group(0)]
def _replace_ascii(self, match):
s = match.group(0)
try:
return self.escape_dict[s]
except KeyError:
pass
n = ord(s) if isinstance(s, str) else s
if n < 0x10000:
return SHORT_ASCII.format(n)
# surrogate pair
n -= 0x10000
s1 = 0xD800 | ((n >> 10) & 0x3FF)
s2 = 0xDC00 | (n & 0x3FF)
return LONG_ASCII.format(s1, s2)
_QUOTES = tuple(_Quote(t) for t in tables.QUOTES)
|
nilq/baby-python
|
python
|
""" predict.py
Predict flower name from an image with predict.py along with the probability of
that name. That is, you'll pass in a single image /path/to/image and return the
flower name and class probability.
Basic usage:
python predict.py /path/to/image checkpoint
Args:
--img
--checkpoint
--top_k
--category_names
--gpu
Returns:
Most likely flower names and class probabilities
Examples:
Return top K most likely classes:
python predict.py input checkpoint --top_k 3
Use a mapping of categories to real names:
python predict.py input checkpoint --category_names cat_to_name.json
Use GPU for inference:
python predict.py input checkpoint --gpu
"""
__author__ = "Ken Norton <ken@kennethnorton.com>"
import json
import argparse
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image
from torchvision import transforms, models
def main():
parser = argparse.ArgumentParser(
description='Predict flower name from an image with along with the \
probability of that name. ')
parser.add_argument('--img',
type=str,
dest='image_path',
default='flowers/train/1/image_06770.jpg',
action='store',
help='File path to an image input')
parser.add_argument('--checkpoint',
default='model_checkpoint.pth',
type=str,
dest='checkpoint',
action='store',
help='Model checkpoint')
parser.add_argument('--top_k',
dest='top_k',
type=int,
default=5,
action='store',
help='Number of top classes to return')
parser.add_argument('--category_names',
type=str,
dest='category_names',
default='cat_to_name.json',
action='store',
help='JSON file containing category-name mapping')
parser.add_argument('--gpu',
type=bool,
dest='gpu',
default=True,
action='store',
help='Number of epochs')
pa = parser.parse_args()
path_to_img = pa.image_path
checkpoint = pa.checkpoint
top_k = pa.top_k
category_names = pa.category_names
gpu = pa.gpu
if (torch.cuda.is_available() and gpu):
gpu = True
device = torch.device('cuda:0')
else:
gpu = False
device = torch.device('cpu')
with open(category_names, 'r') as f:
cat_to_name = json.load(f)
def load_checkpoint(filepath):
cp = torch.load(filepath)
if cp['architecture'] == 'vgg16':
model = models.vgg16(pretrained=True)
model.name = 'vgg16'
elif cp['architecture'] == 'alexnet':
model = models.alexnet(pretrained=True)
model.name = 'alexnet'
# Freeze parameters so we don't backprop through them
for param in model.parameters():
param.requires_grad = False
model.class_to_idx = cp['class_to_idx']
model.classifier = cp['classifier']
model.load_state_dict(cp['state_dict'])
return model
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns a NumPy array
'''
img_handler = Image.open(image)
process_img = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
return process_img(img_handler)
def predict(image_path, model):
model.to(device)
img_torch = process_image(image_path)
img_torch = img_torch.unsqueeze_(0)
img_torch = img_torch.float()
with torch.no_grad():
output = model.forward(img_torch.cuda())
probability = F.softmax(output.data, dim=1)
pb, cl = probability.topk(top_k)
if gpu:
return pb.cpu().numpy(), cl.cpu().numpy()
else:
return pb.numpy(), cl.numpy()
def print_predict(image_path, model):
probs, classes = predict(image_path, model)
probabilities = probs[0]
class_names = {val: key for key, val in model.class_to_idx.items()}
c_names = [cat_to_name[class_names[x]] for x in classes[0]]
index = np.arange(len(c_names))
print('Predictions for ', image_path, ':')
for i in index:
prob = "{0:.2f}".format(probabilities[i] * 100)
print(prob, '% -- ', c_names[i])
model = load_checkpoint(checkpoint)
print_predict(path_to_img, model)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import accounting.config
import pytransact.testsupport
def pytest_configure(config):
global unconfigure
unconfigure = pytransact.testsupport.use_unique_database()
accounting.config.config.set('accounting', 'mongodb_dbname',
pytransact.testsupport.dbname)
def pytest_unconfigure(config):
unconfigure()
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
import math
#Generate the ovsf tree
def ovsfGenerator(numberOfMobile):
#calculate the depth of the OVSF code tree
numberOfColumn = math.ceil(math.log(numberOfMobile,2))
column = [1]
#Generation of the list of codes
for i in range (0,numberOfColumn):
newColumn=[]
xornumber=pow(2,pow(2,i))-1 #create a mask in order to do a bit inversion of the code
for j in column :
codesize = j.bit_length()
code=(j<<codesize) + j #generate the first code by duplicating the previous code
newColumn.append(code)
code=(j<<codesize) + (j^xornumber) #generate the second code by duplicating the previous code and inverting all bits of it
newColumn.append(code)
column=newColumn
return column
|
nilq/baby-python
|
python
|
from pprint import pprint as pp
from funcs import *
import time
import random
import ConfigParser
config= ConfigParser.ConfigParser()
config.read('config.cfg')
PERIOD = int(config.get('Run','period'))
## currently set to make it so we don't hit window
## rate limits on friendship/show
## should be able to increase to 200 when using
## friendship/lookup
LIMIT = config.get('Run','limit')
def wait(period):
window_in_seconds = period * 60
seconds = random.randint(0,window_in_seconds)
pp("Waiting %d minutes" % (float(seconds) / 60) )
time.sleep(seconds)
def tweetserve():
# get the latest mentioners
mentioners = get_mentioners(LIMIT)
pp("# mentioners: %d" % len(mentioners))
# filter out those whose tweets are protected
mentioners = [m for m in mentioners if not m['protected']]
pp("# unprotected mentioners: %d" % len(mentioners))
ids = list(set([m['id'] for m in mentioners]))
pp("# unique ids: %d" % len(ids))
friendships = lookup_friendships(ids)
#filter out people that don't follow
friendships = [f for f
in friendships
if 'followed_by' in f['connections']]
pp("# following mentioners: %d" % len(friendships))
selected = random.sample(friendships,1)[0]
pp("Selected friend: %s / @%s" % (selected['name'],selected['screen_name']))
pp("Connections: %s" % (",".join(selected['connections'])))
sn = selected['screen_name']
#selects last 20 tweets by default but could have this be a setting
tweets = t.statuses.user_timeline(screen_name=sn)
if 'following' not in selected['connections']:
new_friend = t.friendships.create(screen_name=sn)
pp("Created new friendship")
rt = None
while rt is None and len(tweets) > 0:
lt = tweets.pop(0)
try:
rt = t.statuses.retweet(id=lt['id'])
pp("RT: @%s: %s" % (lt['user']['screen_name'],lt['text']))
except:
pp("Unable to RT tweet: @%s: %s" % (lt['user']['screen_name'],lt['text']))
pass
wait(PERIOD)
tweetserve()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3.6
#
# uloop_check.py
#
# Gianna Paulin <pauling@iis.ee.ethz.ch>
# Francesco Conti <f.conti@unibo.it>
#
# Copyright (C) 2019-2021 ETH Zurich, University of Bologna
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# See LICENSE.sw.txt for details.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from uloop_common import *
import math
# high-level loop
def iterate_hl_loop(TP, oh, ow, nof, nif, fs, qa, qw):
ih = (oh - 1) + fs
iw = (ow - 1) + fs
FB = 5 # filter buffer size (FB*FB)
BS = 4 # block size
output_buffer_size = 3
n_tiles_K_in = int(math.ceil(nif/TP))
n_tiles_K_out = int(math.ceil(nof/TP))
n_tiles_Hout = int(math.ceil(ih/FB))
n_tiles_Wout = int(math.ceil(iw/FB))
n_tiles_qa = int(math.ceil(qa/BS))
n_xpatches = n_tiles_Hout * n_tiles_Wout #* n_tiles_qa
xpatch_size = FB*FB*BS*TP
ypatch_size = output_buffer_size*output_buffer_size*TP
# reset idices
y_idx = 0
W_idx = 0
NQ_idx = 0
x_idx = 0
curr_idx = (0, 0, 0, 0, 0)
if fs==3:
n_w_stream_per_x_patch = qw
w_stream_size = fs*fs*TP
total_w_stream = n_w_stream_per_x_patch*w_stream_size
else:
n_w_stream_per_x_patch = fs
w_stream_size = qw*TP
total_w_stream = n_w_stream_per_x_patch*w_stream_size
for i_major in range(n_tiles_Hout):
for j_major in range(n_tiles_Wout):
for k_out_major in range(n_tiles_K_out):
for k_in_major in range(n_tiles_K_in):
for qa_major in range(n_tiles_qa):
# print(i_major, j_major, k_out_major, k_in_major, qa_major)
# print(n_tiles_Hout, n_tiles_Wout, n_tiles_K_out, n_tiles_K_in, n_tiles_qa)
W_idx = k_out_major*(n_tiles_K_in*(2+total_w_stream))-k_out_major*2 + k_in_major*(total_w_stream+2)
y_idx = i_major*n_tiles_Wout*n_tiles_K_out*ypatch_size + j_major*n_tiles_K_out*ypatch_size + k_out_major*ypatch_size
x_idx = i_major*n_tiles_Wout*(n_tiles_K_in*n_tiles_qa*xpatch_size) + j_major*(n_tiles_K_in*n_tiles_qa*xpatch_size) + k_in_major*(n_tiles_qa*xpatch_size) + qa_major*xpatch_size
NQ_idx = k_out_major * 32 *(32+16)
curr_idx = i_major, j_major, k_out_major, k_in_major, qa_major
yield W_idx, x_idx, NQ_idx, y_idx, curr_idx
VERBOSE = True
def uloop_check(TP, oh, ow, nof, nif, fs, qa, qw, verbose=VERBOSE):
print("> Config TP=%d, oh=%d, ow=%d, nof=%d, nif=%d, fs=%d, qa=%d, qw=%d" % (TP, oh, ow, nof, nif, fs, qa, qw))
ih = (oh - 1) + fs
iw = (ow - 1) + fs
FB = 5 # filter buffer size (FB*FB)
BS = 4 # block size
n_tiles_K_in = int(math.ceil(nif/TP))
n_tiles_K_out = int(math.ceil(nof/TP))
n_tiles_Hout = int(math.ceil(ih/FB))
n_tiles_Wout = int(math.ceil(iw/FB))
n_tiles_qa = int(math.ceil(qa/BS))
n_xpatches = n_tiles_Hout * n_tiles_Wout
print("n_xpatches: ", n_xpatches)
loops_range = [
n_tiles_qa,
n_tiles_K_in,
n_tiles_K_out,
n_xpatches
]
if fs==3:
stream_size_fs = TP*fs*qw
else: # fs==1:
stream_size_fs = TP*fs*fs*qw
registers = [
0,
0,
0,
0,
0,
0,
nif,
nof,
TP*FB*FB*4,
TP*9,
stream_size_fs, #TP*fs*qw, # or TP*fs*fs*qw
TP*fs*fs*qw+2,
32*(32+16),
0
]
loops_ops,code,mnem = uloop_load("code.yml")
loops = uloop_get_loops(loops_ops, loops_range)
err = 0
idx = []
for j in range(NB_LOOPS):
idx.append(0)
state = (0,0,0,idx)
busy = False
execute = True
# uloop_print_idx(state, registers)
hidx = 0, 0, 0, 0, 0
hl_loop = iterate_hl_loop(TP, oh, ow, nof, nif, fs, qa, qw)
hW, hx, hNQ, hy, hidx = hl_loop.__next__()
for i in range(0,1000000):
new_registers = uloop_execute(state, code, registers)
execute,end,busy,state = uloop_state_machine(loops, state, verbose=verbose)
if execute:
registers = new_registers
if not busy:
try:
hW, hx, hNQ, hy, hidx = hW, hx, hNQ, hy, hidx = hl_loop.__next__()
except StopIteration:
pass
if verbose:
uloop_print_idx(state, registers)
uW, ux, uNQ, uy = registers[0:4]
if (hW != uW or hx != ux or hNQ != uNQ or hy != uy):
if verbose:
print(" ERROR!!!")
print(" High-level: W=%d x=%d NQ=%d y=%d" % (hW, hx, hNQ, hy))
print(" uLoop: W=%d x=%d NQ=%d y=%d" % (uW, ux, uNQ, uy))
err += 1
if end:
break
print(err, " errors", "!!!" if err > 0 else "")
return err
for oh in range(3,12,3):
for ow in range(3,12,3):
for fs in (1,0):
for nif in range(32, 64+32, 32):
for qa in range(1,9):
for qw in range(1,9):
for nof in range(32, 64+32, 32):
err = uloop_check(
TP = 32,
fs = fs,
nof = nof,
nif = nif,
oh = oh,
ow = ow,
qa = qa,
qw = qw,
verbose = False
)
if err>0:
break
if err>0:
break
if err>0:
break
if err>0:
break
if err>0:
break
if err>0:
break
if err>0:
break
if err>0:
uloop_check(
TP = 32,
fs = fs,
nof = nof,
nif = nif,
oh = oh,
ow = ow,
qa = qa,
qw = qw,
verbose = True
)
|
nilq/baby-python
|
python
|
"""Cron job code."""
from google.appengine.ext import ndb
import cloudstorage
import datetime
import logging
from api import PermissionDenied
from model import (Email, ErrorChecker, Indexer, User)
import config
import util
class Cron:
"""A collection of functions and a permission scheme for running cron jobs.
Very similar to api.Api."""
def __init__(self, api):
"""Requires an admin api object."""
if not api.user.is_admin:
raise PermissionDenied("Crons must be run as admin.")
self.api = api
def check_for_errors(self):
"""Check for new errors - email on error.
Must be called with internal_api for full permissions.
See named_model@Index for full description.
"""
checker = ErrorChecker.get_or_insert('the error checker')
result = checker.check()
checker.put()
return result
def send_pending_email(self):
"""Send any email in the queue.
Must be called with internal_api for full permissions.
See id_model@Email for full description.
"""
return Email.send_pending_email()
def assign_usernames(self):
"""Assigns usernames to all existing users without a
current username
"""
query = User.query()
changed_users = []
for user in query:
user_dict = user.to_dict()
if user_dict.get('username') is None:
user.username = User.create_username(**user_dict)
changed_users.append(user)
# Removes bad first_names
elif 'first_name' in user_dict:
if '@' in user_dict['first_name']:
user.first_name = user_dict['first_name'].split('@')[0]
user.username = None
changed_users.append(user)
# Temporary limiting for acceptance
if len(changed_users) > 40:
break
ndb.put_multi(changed_users)
return changed_users
def index_all(self):
"""Cron job to index all content. Should only be run as
a job because it will likely timeout otherwise. Comes in handy
for production where updates are constantly being made.
"""
indexer = Indexer.get_or_insert('the-indexer')
index = indexer.get_index()
entities = indexer.get_all_content_entities()
indexed_count = 0
for entity in entities:
# Added redundancy (we really don't want these!!)
if getattr(entity, 'listed') is True:
index.put(entity.to_search_document())
indexed_count += 1
# Update last_check on indexer to now
now = datetime.datetime.now()
indexer.last_check = (
now
)
indexer.put()
return indexed_count
def index(self):
"""Index content entities for text search.
Must be called with internal_api for full permissions.
See named_model@Index for full description.
"""
indexer = Indexer.get_or_insert('the-indexer')
index = indexer.get_index()
# Now and the max modified time of indexed entites
# will be used to update last_check. Basically the
# last check should either be now time if no items
# were found to update or the age of the last item
# that was updated.
#
# The reason that we cannot always use now time is
# because we may not index all of the enties between
# the last check and now if there are many of them.
now = datetime.datetime.now()
max_modified_time_of_indexed_entity = None
# get changes
changed_entities = indexer.get_changed_content_entities()
# post changes
for entity in changed_entities:
# Added redundancy (we really don't want these!!)
if getattr(entity, 'listed') is True:
index.put(entity.to_search_document())
# Update the most recent modification time for an
# indexed entity
if max_modified_time_of_indexed_entity is None:
max_modified_time_of_indexed_entity = entity.modified
else:
max_modified_time_of_indexed_entity = max(
max_modified_time_of_indexed_entity,
entity.modified
)
# Update last_check so that future calls to index no longer
# try to index these same items. The logic of what to set
# last_check to is articulated above.
any_updates = max_modified_time_of_indexed_entity is not None
indexer.last_check = (
now if not
any_updates
else max_modified_time_of_indexed_entity
)
indexer.put()
return changed_entities
def clean_gcs_bucket(self, bucket):
"""Deletes all files in a given GCS bucket.
Used for emptying out cluttered buckets, like our backup buckets."""
filenames = [f.filename for f in cloudstorage.listbucket('/' + bucket)]
files_deleted = []
for filename in filenames:
try:
cloudstorage.delete(filename)
files_deleted.append(filename)
except cloudstorage.NotFoundError:
# We don't care, as long as the bucket winds up empty.
logging.warning("NotFoundError on file {}".format(filename))
logging.info("Files deleted: {}".format(files_deleted))
return files_deleted
|
nilq/baby-python
|
python
|
######################################################################################################################
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
# !/bin/python
from lib.state_machine import StateMachine
from lib.ssm import SSM
from lib.cloud_watch_events import CloudWatchEvents
from lib.metrics import Metrics
from lib.string_manipulation import convert_string_to_list
from os import environ
import time
import inspect
from lib.helper import sanitize, get_region
from lib.s3 import S3
import os
import json
from uuid import uuid4
class StepFunctions(object):
# Execute State Machines
def __init__(self, event, logger):
self.logger = logger
self.event = event
self.logger.info("State Machine Event")
self.logger.info(event)
def trigger_state_machine(self):
try:
self.logger.info("Executing: " + self.__class__.__name__ + "/" + inspect.stack()[0][3])
sm = StateMachine(self.logger)
account_id = self.event.get('account')
resource_type = 'stno-console' if self.event.get('detail', {}).get('resource-type') is None \
else account_id + '-' + self.event.get('detail', {}).get('resource-type') + '-tagged'
state_machine_arn = environ.get('STATE_MACHINE_ARN')
# Execute State Machine
exec_name = "%s-%s-%s" % ('event-from', resource_type, time.strftime("%Y-%m-%dT%H-%M-%S-%s"))
self.event.update({'StateMachineArn': state_machine_arn})
self.logger.info("Triggering {} State Machine".format(state_machine_arn.split(":", 6)[6]))
response = sm.trigger_state_machine(state_machine_arn, self.event, sanitize(exec_name))
self.logger.info("State machine triggered successfully, Execution Arn: {}".format(response))
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
class CWEventPermissions(object):
def __init__(self, event, logger):
self.event = event
self.params = event.get('ResourceProperties')
self.event_bus_name = self.params.get('EventBusName')
self.logger = logger
self.logger.info("CloudWatch Event Permissions Handler Event")
self.logger.info(event)
def _print_policy(self, cwe):
self.logger.info("Describe Event Bus")
response = cwe.describe_event_bus(self.event_bus_name)
policy = 'Policy Not Found' if response.get('Policy') is None else json.loads(response.get('Policy'))
self.logger.info("Printing Policy")
self.logger.info(policy)
def _is_valid_account_length(self, principal):
account_id_length = 12
if len(principal) == account_id_length:
self.logger.info('The AWS Account ID is 12-digit number. Continuing... ')
else:
raise Exception('The AWS Account ID should be 12-digit number.')
def _create(self, principal_list):
cwe = CloudWatchEvents(self.logger)
# identify if principal is list of account IDs or organization arn
response = None
self.logger.info("Adding following principals to the policy: {}".format(principal_list))
for principal in principal_list:
if 'arn:aws:organizations' in principal:
self.logger.info('Adding Organization ID to the policy: {}'.format(principal))
split_value = principal.split('/')[-1]
condition = {
'Type': 'StringEquals',
'Key': 'aws:PrincipalOrgID',
'Value': split_value
}
# Once we specify a condition with an AWS organization ID, the recommendation is we use "*" as the value
# for Principal to grant permission to all the accounts in the named organization.
response = cwe.put_permission('*', split_value, self.event_bus_name, condition)
else:
self._is_valid_account_length(principal)
self.logger.info('Adding spoke account ID to the policy: {}'.format(principal))
response = cwe.put_permission(principal, principal, self.event_bus_name)
self._print_policy(cwe)
return response
def create_permissions(self):
try:
self.logger.info("Executing: " + self.__class__.__name__ + "/" + inspect.stack()[0][3])
# put permissions
# analyze if the principals is a list of accounts or Org Arn
self._create(self.params.get('Principals'))
return None
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def update_permissions(self):
try:
self.logger.info("Executing: " + self.__class__.__name__ + "/" + inspect.stack()[0][3])
# update permissions
response = None
principal_list = self.params.get('Principals')
old_params = self.event.get('OldResourceProperties')
old_principal_list = old_params.get('Principals')
# Generate add and remove lists for update process
delete_list = list(set(old_principal_list) - set(principal_list))
self.logger.info('Remove permission for following principal(s): {}'.format(delete_list))
# if list is not empty
if delete_list:
response = self._delete(delete_list)
add_list = list(set(principal_list) - set(old_principal_list))
self.logger.info('Put permission for following principal(s): {}'.format(add_list))
# if list is not empty
if add_list:
response = self._create(add_list)
return response
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
def _delete(self, principal_list):
cwe = CloudWatchEvents(self.logger)
self.logger.info("Removing following principals from the policy: {}".format(principal_list))
# identify if principal is list of account IDs or organization arn
response = None
for principal in principal_list:
if 'arn:aws:organizations' in principal:
self.logger.info('Deleting Organization ID from the policy: {}'.format(principal))
split_value = principal.split('/')[-1]
response = cwe.remove_permission(split_value, self.event_bus_name)
else:
self.logger.info('Deleting spoke account ID from the policy: {}'.format(principal))
response = cwe.remove_permission(principal, self.event_bus_name)
self._print_policy(cwe)
return response
def delete_permissions(self):
try:
self.logger.info("Executing: " + self.__class__.__name__ + "/" + inspect.stack()[0][3])
# delete permissions
# analyze if the principals is a list of accounts or Org Arn
response = self._delete(self.params.get('Principals'))
return response
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
# Deploy all the files needed for console to customer's S3 bucket and
# update the configuration file with customer configurations
class S3ConsoleDeploy(object):
def __init__(self, event, logger):
self.event = event
self.params = event.get('ResourceProperties')
self.logger = logger
self.logger.info("Upload console content to s3")
self.logger.info(event)
# Upload console content listed in the manifest file to customer's s3 bucket
def upload_console_files(self):
try:
s3 = S3(self.logger)
self.logger.info("Executing: " + self.__class__.__name__ + "/" + inspect.stack()[0][3])
file_path = os.path.join(os.path.dirname(__file__), "console-manifest.json")
if os.path.exists(file_path):
with open(file_path, 'r') as json_data:
data = json.load(json_data)
destination_bucket = self.params.get('ConsoleBucket')
source_bucket = self.params.get('SrcBucket')
key_prefix = self.params.get('SrcPath') + '/'
for file in data["files"]:
key = 'console/' + file
s3.copy_object(source_bucket, key_prefix, key, destination_bucket)
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
# Upload the configuration file having customer configurations to customer's s3 bucket
def upload_config_file(self):
try:
s3 = S3(self.logger)
self.logger.info("Executing: " + self.__class__.__name__ + "/" + inspect.stack()[0][3])
stno_config = {
"aws_project_region": self.params.get("AwsProjectRegion"),
"aws_cognito_region": self.params.get("AwsCognitoRegion"),
"aws_user_pools_id": self.params.get("AwsUserPoolsId"),
"aws_user_pools_web_client_id": self.params.get("AwsUserPoolsWebClientId"),
"aws_cognito_identity_pool_id": self.params.get("AwsCognitoIdentityPoolId"),
"oauth": {},
"aws_appsync_graphqlEndpoint": self.params.get("AwsAppsyncGraphqlEndpoint"),
"aws_appsync_region": self.params.get("AwsAppsyncRegion"),
"aws_appsync_authenticationType": "AMAZON_COGNITO_USER_POOLS",
"aws_content_delivery_bucket": self.params.get("AwsContentDeliveryBucket"),
"aws_content_delivery_bucket_region": self.params.get("AwsContentDeliveryBucketRegion"),
"aws_content_delivery_url": self.params.get("AwsContentDeliveryUrl")
}
configurations = 'const stno_config = ' + json.dumps(stno_config) + ';'
console_bucket = self.params.get('ConsoleBucket')
key = 'console/assets/stno_config.js'
s3.put_object(console_bucket, key, configurations)
except Exception as e:
message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,
'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}
self.logger.exception(message)
raise
class PrefixListIdToArnConverter(object):
def __init__(self, event, logger):
self.event = event
self.params = event.get('ResourceProperties')
self.logger = logger
self.logger.info(event)
def get_prefix_list_arns(self) -> dict:
"""
Converts the list of prefix list ids to list of prefix list ARNs
:return: list of arns for the customer provided prefix lists
"""
prefix_list = self.params.get('PrefixListIds')
account_id = self.params.get('AccountId')
list_of_prefix_list_ids = convert_string_to_list(prefix_list)
self.logger.info(f"Processing prefix list ids:"
f" {list_of_prefix_list_ids}")
list_of_prefix_list_arns = []
if len(list_of_prefix_list_ids) == 0:
raise ValueError("STNO CFN Parameter Missing: You must "
"provide at least one valid prefix list id.")
else:
for prefix_list_id in list_of_prefix_list_ids:
arn = "%s%s:%s%s%s" % ("arn:aws:ec2:",
environ.get('AWS_REGION'),
account_id,
":prefix-list/",
prefix_list_id)
list_of_prefix_list_arns.append(arn)
response = {"PrefixListArns": list_of_prefix_list_arns}
return response
# Send anonymous metrics
class CFNMetrics(object):
def __init__(self, event, logger):
self.event = event
self.params = event.get('ResourceProperties')
self.logger = logger
self.logger.info(event)
def put_ssm_parameter(self, key, value):
try:
ssm = SSM(self.logger)
response = ssm.describe_parameters(key)
self.logger.info(response)
# put parameter if key does not exist
if not response:
ssm.put_parameter(key, value)
except Exception as e:
self.logger.info(e)
pass
# put metrics_flag and uuid in the parameter store
def put_ssm(self):
try:
# create SSM parameters to send anonymous data if opted in
flag_value = self.params.get('MetricsFlag')
self.put_ssm_parameter('/solutions/stno/metrics_flag', flag_value)
self.put_ssm_parameter('/solutions/stno/customer_uuid', str(uuid4()))
except Exception as e:
self.logger.info(e)
pass
# Upload the configuration file having customer configurations to customer's s3 bucket
def send_metrics(self):
try:
self.put_ssm()
self.logger.info(self.params)
data = {
"PrincipalType": self.params.get('PrincipalType'),
"ApprovalNotificationFlag": self.params.get('ApprovalNotification'),
"AuditTrailRetentionPeriod": self.params.get('AuditTrailRetentionPeriod'),
"DefaultRoute": self.params.get('DefaultRoute'),
"Region": get_region(),
"SolutionVersion": self.params.get('SolutionVersion'),
"CreatedNewTransitGateway": self.params.get(
'CreatedNewTransitGateway')
}
send = Metrics(self.logger)
send.metrics(data)
except Exception as e:
self.logger.info(e)
pass
|
nilq/baby-python
|
python
|
"""
Operators with arity 2.
Take 2 colors and mix them in some way. Owns two 0/1 arity operators,
which calculate the initial colors.
Adds a shift by colors, that is, for two colors [rgb] + [RGB] can mix as
([rR gG bB], [rG gB bR], [rB gR bG]).
"""
from abc import ABC, abstractmethod
from .base import Operator, operator_subclass_names, COLOR_TYPE
from .arity_1_operators import ZERO_ONE_OPERATOR
class TwoArityOperator(Operator, ABC):
"""
This is a two-level operator.
Modifies and mix the original values using the formula from the
`formula` method.
Has two colors that were originally generated.
"""
arity = 2
suboperators: tuple[ZERO_ONE_OPERATOR]
def __self_init__(self):
self.shift = self.random.randint(0, 2)
def __str_extra_args__(self) -> list[str]:
return [f"shift={self.shift}"]
@abstractmethod
def formula(self, col_1: float, col_2: float) -> float:
"""
The formula by which the two channels of colors are mixed.
"""
pass
def func(self, first_col: COLOR_TYPE, second_col: COLOR_TYPE) -> COLOR_TYPE:
"""
Color generation function. Accepts data for generation and
outputs the first color step according to the described formula.
"""
return (
self.formula(first_col[0], second_col[(0 + self.shift) % 3]),
self.formula(first_col[1], second_col[(1 + self.shift) % 3]),
self.formula(first_col[2], second_col[(2 + self.shift) % 3]),
)
# ======================================================================
class Sum(TwoArityOperator):
"""
Calculates the average between the two colors.
Slightly decreases the brightness of the color because it mixed
values.
"""
def formula(self, col_1, col_2):
return (col_1 + col_2) / 2
class Product(TwoArityOperator):
"""
Multiplies one color by another.
"""
def formula(self, col_1, col_2):
return col_1 * col_2
class Mod(TwoArityOperator):
"""
Calculates the mod of one color relative to another.
It decreases the brightness of the color, making it more like gray
color (0.0), as it multiplies the fractional values by each other.
"""
def formula(self, col_1, col_2):
if col_2 == 0:
return 0
return col_1 % col_2
class Exponentiation(TwoArityOperator):
"""
It changes the color by multiplying one color by a degree of
another. The color sign is taken from the second color sign.
It increases the brightness of the color, almost always giving
brightness (< -0.5) | (> 0.5).
"""
def formula(self, col_1, col_2):
col_1 = abs(col_1)
if col_2 < 0:
return - col_1 ** abs(col_2)
else:
return col_1 ** col_2
ZERO_ONE_TWO_OPERATOR = ZERO_ONE_OPERATOR | TwoArityOperator
__all__ = operator_subclass_names(locals())
|
nilq/baby-python
|
python
|
from urllib.parse import urlparse
def parse_link(link):
href = link.attrs.get("href")
return href and urlparse(href)
|
nilq/baby-python
|
python
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown] slideshow={"slide_type": "slide"}
# <img src="img/python-logo-notext.svg"
# style="display:block;margin:auto;width:10%"/>
# <h1 style="text-align:center;">Convolutional Neural Nets</h1>
# <h2 style="text-align:center;">Dr. Matthias Hölzl</h2>
# %% [markdown] slideshow={"slide_type": "subslide"}
# # Darstellung von Bildern
#
# <img src="img/ag/Figure-21-001.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# # Filter für gelbe Pixel
# <img src="img/ag/Figure-21-002.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Funktionsweise des Gelbfilters
# <img src="img/ag/Figure-21-003.png" style="width: 40%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## "Ausstanzen" der Werte
# <img src="img/ag/Figure-21-004.png" style="width: 40%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Verschieben des Filters
# <img src="img/ag/Figure-21-005.png" style="width: 50%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Beispiel
# <img src="img/ag/Figure-21-006.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# ## Parallele Verarbeitung
# <img src="img/ag/Figure-21-007.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# ## Konvolution
#
# <img src="img/ag/Figure-21-008.png" style="width: 40%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Konvolution: Anker
#
# <img src="img/ag/Figure-21-009.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Konvolution: Funktionsweise
# <img src="img/ag/Figure-21-010.png" style="width: 50%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Input und Gewichte haben die gleiche Größe
# <img src="img/ag/Figure-21-011.png" style="width: 50%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# ## Verschieben des Filters
# <img src="img/ag/Figure-21-013.png" style="width: 40%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# ## Beispiel
#
# <img src="img/ag/Figure-21-014.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
#
# <br/>
# <div style="display: block; width: 30%; float: left">
# <ul>
# <li> Rote Felder: -1</li>
# <li>Gelbe Felder: 1</li>
# <li>Schwarze Felder: 0</li>
# <li>Weiße Felder: 1</li>
# </ul>
# </div>
#
# <div style="display: block; width: 50%; float: left;">
# <ul>
# <li>Minimalwert: -6</li>
# <li>Maximalwert: 3</li>
# </ul>
# </div>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-015.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-016.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# ## Andere Betrachtungsweise: Zerschneiden von Bildern
# <img src="img/ag/Figure-21-017.png" style="width: 30%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# ## Hierarchische Features
# <img src="img/ag/Figure-21-018.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-019.png" style="width: 40%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-020.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-021.png" style="width: 50%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-022.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-023.png" style="width: 40%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# # Randwerte
# <img src="img/ag/Figure-21-024.png" style="width: 30%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-025.png" style="width: 35%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Verkleinerung des Resultats
#
# <img src="img/ag/Figure-21-026.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Padding
# <img src="img/ag/Figure-21-027.png" style="width: 35%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# # ConvNet für MNIST
# <img src="img/ag/Figure-21-048.png" style="width: 60%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# ## Performance
# <img src="img/ag/Figure-21-049.png" style="width: 50%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# <img src="img/ag/Figure-21-050.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% slideshow={"slide_type": "slide"}
from fastai.vision.all import *
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
# %%
model = nn.Sequential(
nn.Conv2d(1, 32, 3, 1),
nn.ReLU(),
nn.Conv2d(32, 64, 3, 1),
nn.MaxPool2d(2),
nn.Dropout2d(0.25),
nn.Flatten(1),
nn.Linear(9216, 128),
nn.ReLU(),
nn.Dropout2d(0.5),
nn.Linear(128, 10),
nn.LogSoftmax(dim=1)
)
# %%
transform = transforms.Compose([transforms.ToTensor()])
batch_size = 256
test_batch_size = 512
epochs = 5
learning_rate = 0.001
# %%
train_loader = DataLoader(
datasets.MNIST('../data', train=True, download=True, transform=transform),
batch_size=batch_size, shuffle=True)
test_loader = DataLoader(
datasets.MNIST('../data', train=False, transform=transform),
batch_size=test_batch_size, shuffle=True)
# %%
data = DataLoaders(train_loader, test_loader)
# %%
learn = Learner(data, model, loss_func=F.nll_loss, opt_func=Adam, metrics=accuracy)
# %%
learn.lr_find()
# %%
learn.fit_one_cycle(epochs, learning_rate)
# %% [markdown] slideshow={"slide_type": "slide"}
# # Stride 1
# <img src="img/ag/Figure-21-028.png" style="width: 40%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Stride 3 $\times$ 2
# <img src="img/ag/Figure-21-029.png" style="width: 20%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Stride 3 $\times$ 2
# <img src="img/ag/Figure-21-030.png" style="width: 50%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Gleichförmige Strides: 2, 3
# <img src="img/ag/Figure-21-031.png" style="width: 50%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Stride = Filtergröße
# <img src="img/ag/Figure-21-032.png" style="width: 50%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Farbbilder: Mehrere Layer
# <img src="img/ag/Figure-21-033.png" style="width: 40%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-034.png" style="width: 40%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-035.png" style="width: 40%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# ## Stacks von Konvolutionen
# <img src="img/ag/Figure-21-036.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-037.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-038.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# # 1D-Konvolution
# <img src="img/ag/Figure-21-039.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# # 1$\times$1-Konvolution
# <img src="img/ag/Figure-21-040.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# # 1$\times$1-Konvolution: Dimensionsreduktion
# <img src="img/ag/Figure-21-041.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# # Padding und Upsampling (Fractional Convolution)
#
# <img src="img/ag/Figure-21-042.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# ## Kein Padding
# <img src="img/ag/Figure-21-043.png" style="width: 40%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## 1 Pixel Padding
# <img src="img/ag/Figure-21-044.png" style="width: 30%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## 2 Pixel Padding
# <img src="img/ag/Figure-21-045.png" style="width: 35%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Upsampling durch Konvolution
# <img src="img/ag/Figure-21-046.png" style="width: 35%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-047.png" style="width: 35%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# # VGG 16
# <img src="img/ag/Figure-21-051.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# <img src="img/ag/Figure-21-052.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# <img src="img/ag/Figure-21-053.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-054.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-055.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-056.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# ## Beispiel-Klassifizierung
# <img src="img/ag/Figure-21-057.png" style="width: 40%; margin-left: auto; margin-right: auto;"/>
# %% [markdown]
# # Klassifizierung von Bildern mit VGG16
# %%
path = untar_data(URLs.DOGS)
# %%
path.ls()
# %%
files = get_image_files(path/'images')
len(files)
# %%
files[0]
# %%
def label_func(f):
return f[0].isupper()
# %%
dls = ImageDataLoaders.from_name_func(path, files, label_func, item_tfms=Resize(224))
# %%
dls.show_batch()
# %%
learn = cnn_learner(dls, vgg16_bn, metrics=error_rate)
learn.fine_tune(1)
# %%
learn.predict(files[0]), files[0]
# %%
learn.show_results()
# %% [markdown] slideshow={"slide_type": "slide"}
# ## Visualisierung von VGG16 (Gradient Ascent)
# <img src="img/ag/Figure-21-058.png" style="width: 35%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# <img src="img/ag/Figure-21-059.png" style="width: 35%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# <img src="img/ag/Figure-21-060.png" style="width: 35%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# <img src="img/ag/Figure-21-061.png" style="width: 35%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# <img src="img/ag/Figure-21-062.png" style="width: 35%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# # Visualisierung (Effekt einzelner Layer)
# <img src="img/ag/Figure-21-063.png" style="width: 35%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-064.png" style="width: 35%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-065.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-066.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-067.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# <img src="img/ag/Figure-21-068.png" style="width: 70%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "slide"}
# # Adverserial Examples
# <img src="img/ag/Figure-21-069.png" style="width: 50%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-070.png" style="width: 50%; margin-left: auto; margin-right: auto;"/>
# %% [markdown] slideshow={"slide_type": "subslide"}
# <img src="img/ag/Figure-21-071.png" style="width: 50%; margin-left: auto; margin-right: auto;"/>
# %%
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def ani_easy(tas, cmap='BrBG'):
# Get a handle on the figure and the axes
fig, ax = plt.subplots(figsize=(12,6))
# Plot the initial frame.
cax = tas[0,:,:].plot(
add_colorbar=True,
cmap=cmap,
#cmap='BrBG',
#cmap='magma',
# vmin=-40, vmax=40,
cbar_kwargs={
'extend':'neither'
}
)
num_frames = tas.shape[0]
# Next we need to create a function that updates the values for the colormesh, as well as the title.
def animate(frame):
cax.set_array(tas[frame,:,:].values.flatten())
ax.set_title("Time = " + str(tas.coords['time'].values[frame])[:13])
# Finally, we use the animation module to create the animation.
ani = animation.FuncAnimation(
fig, # figure
animate, # name of the function above
frames=num_frames, # Could also be iterable or list
interval=200 # ms between frames
)
return ani
|
nilq/baby-python
|
python
|
from collections import ChainMap, defaultdict
from itertools import chain
import pathlib
import re
from cldfbench import Dataset as BaseDataset, CLDFSpec
from pybtex.database import parse_string
from pydictionaria.sfm_lib import Database as SFM
from pydictionaria.preprocess_lib import marker_fallback_sense, merge_markers
from pydictionaria import sfm2cldf
CROSSREF_BLACKLIST = {'xv', 'lg'}
CROSSREF_MARKERS = {'cf', 'mn', 'sy', 'an', 'cont', 'lv'}
def _unhtml(pair):
marker, value = pair
if marker == 'mr':
return marker, re.sub(r'<compo>([^<]+)</compo>', r'|fv{\1}', value)
else:
return pair
def unhtml_mr(entry):
if entry.get('mr'):
return entry.__class__(map(_unhtml, entry))
else:
return entry
def prepreprocess(entry):
entry = unhtml_mr(entry)
return entry
class MySFM(SFM):
def __init__(self, entries):
self.extend(entries)
class EntrySplitter:
def __init__(self):
self._homonyms = defaultdict(int)
self.id_map = {}
self.la_index = {}
def _split_groups(self, entry):
groups = [entry.__class__()]
for marker, value in entry:
if marker == 'gp':
groups.append(entry.__class__())
groups[-1].append((marker, value))
return groups[0], groups[1:]
def _split_senses(self, entry):
if not entry:
return []
senses = [entry.__class__()]
for marker, value in entry:
if marker == 'sn':
senses.append(entry.__class__())
senses[-1].append((marker, value))
return senses
def _extract_subentries(self, entry):
main_entry = entry.__class__()
subentries = []
senses = self._split_senses(entry)
for sense in senses:
sense_subentries = []
for marker, value in sense:
if marker == 'se':
sense_subentries.append(entry.__class__())
if sense_subentries:
sense_subentries[-1].append((marker, value))
else:
main_entry.append((marker, value))
subentries.extend(sense_subentries)
return main_entry, subentries
def _generate_subentry(self, subentry, parent_id, ps):
lx = subentry.get('se')
hm = subentry.get('hm') or ''
old_id = '{}_{}'.format(lx, hm) if hm else lx
self._homonyms[lx] += 1
new_hm = str(self._homonyms[lx])
new_id = '{}{}'.format(lx, new_hm)
self.id_map[old_id] = new_id
new_entry = subentry.__class__()
new_entry.append(('lx', lx))
new_entry.append(('hm', new_hm))
new_entry.append(('cont', parent_id))
# Some subentries override the part of speech of the entry
if ps and not subentry.get('ps'):
new_entry.append(('ps', ps))
new_entry.extend((m, v) for m, v in subentry if m not in ('se', 'hm'))
return new_entry
def split_entry(self, entry):
lx = entry.get('lx')
la = entry.get('la')
hm = entry.get('hm') or ''
citation_form = la or lx
old_id = '{}_{}'.format(citation_form, hm) if hm else citation_form
ps = entry.get('ps')
main_entry, groups = self._split_groups(entry)
if groups:
new_entries = []
subentries = []
for group in groups:
group_entries = self._split_groups(group)
gp = group.get('gp') or ''
old_gid = '{}.{}'.format(old_id, gp) if gp else old_id
self._homonyms[lx] += 1
new_hm = str(self._homonyms[lx])
new_id = '{}{}'.format(lx, new_hm)
self.id_map[old_gid] = new_id
if la:
if la not in self.la_index:
self.la_index[la] = new_id
la_gid = '{}.{}'.format(la, gp) if gp else la
if la_gid not in self.la_index:
self.la_index[la_gid] = new_id
group_entry, group_subentries = self._extract_subentries(group)
group_ps = group_entry.get('ps')
new_entry = entry.__class__(
(m, v) for m, v in main_entry if m not in ('hm', 'ps'))
new_entry.append(('hm', new_hm))
# Some groups override the part of speech of the entry
if ps and not group_ps:
new_entry.append(('ps', ps))
new_entry.extend(
(m, v) for m, v in group_entry if m != 'gp')
new_entries.append(new_entry)
subentries.extend(
self._generate_subentry(subentry, old_gid, group_ps or ps)
for subentry in group_subentries)
if len(new_entries) > 1:
for entry in new_entries:
heterosemes = [
'{}{}'.format(e.get('lx'), e.get('hm'))
for e in new_entries
if e is not entry]
entry.append(('heterosemes', ' ; '.join(heterosemes)))
for e in new_entries:
yield e
for e in subentries:
yield e
else:
main_entry, subentries = self._extract_subentries(main_entry)
self._homonyms[lx] += 1
new_hm = str(self._homonyms[lx])
new_id = '{}{}'.format(lx, new_hm)
self.id_map[old_id] = new_id
if la and la not in self.la_index:
self.la_index[la] = new_id
new_entry = entry.__class__(
(m, v) for m, v in main_entry if m != 'hm')
new_entry.insert(1, ('hm', new_hm))
yield new_entry
for subentry in subentries:
yield self._generate_subentry(subentry, old_id, ps)
def _fix_single_ref(ref, id_map):
# Shave off sense numbers
ref = re.sub(r'–\d+$', '', ref.strip())
return (
id_map.get(ref)
or id_map.get('{}_1'.format(ref))
or id_map.get('{}.A'.format(ref))
or id_map.get('{}_1.A'.format(ref))
or ref)
def _fix_crossref_field(value, id_map):
return ';'.join(_fix_single_ref(v, id_map) for v in value.split(';'))
def fix_crossrefs(entry, id_map):
def fix_inline_crossref(match):
new_link = _fix_crossref_field(
'{}{}'.format(match.group(2), match.group(3) or ''),
id_map)
return '|{}{{{}}}'.format(match.group(1), new_link)
new_entry = entry.__class__()
for marker, value in entry:
if marker in CROSSREF_MARKERS:
value = _fix_crossref_field(value, id_map)
elif marker not in CROSSREF_BLACKLIST:
value = re.sub(
r'\|(fv|vl)\{([^}]+)\}(?:\|hm\{(\d+)\})?',
fix_inline_crossref,
value)
new_entry.append((marker, value))
return new_entry
def reorganize(sfm):
"""Use this function if you need to manually add or remove entrys from the
SFM data.
Takes an SFM database as an argument and returns a modified SFM database.
"""
splitter = EntrySplitter()
sfm = MySFM(
new_entry
for old_entry in sfm
for new_entry in splitter.split_entry(old_entry))
sfm.visit(lambda e: fix_crossrefs(e, ChainMap(splitter.id_map, splitter.la_index)))
return sfm
def _convert_before_sn(mapping, entry):
found_sn = False
for m, v in entry:
if found_sn:
yield m, v
elif m == 'sn':
found_sn = True
yield m, v
else:
yield mapping.get(m, m), v
def convert_before_sn(mapping, entry):
if entry.get('sn'):
return entry.__class__(_convert_before_sn(mapping, entry))
else:
return entry
def remove_markers(markers, entry):
return entry.__class__(
(m, v)
for m, v in entry
if m not in markers)
def move_images_into_sense(entry):
"""Sometimes there are \pc tags in the entry -- move those to the first sense."""
if not entry.get('sn') or not entry.get('pc'):
return entry
new_entry = entry.__class__()
found_sn = None
images = []
for m, v in entry:
if found_sn:
new_entry.append((m, v))
elif m == 'pc':
images.append([v])
elif m == 'lg':
images[-1].append(v)
elif m == 'sn':
# jump out early if the entry did not contain any pictures
if not images:
return entry
found_sn = True
new_entry.append((m, v))
for image in images:
new_entry.append(('pc', image[0]))
for lg in image[1:]:
new_entry.append(('lg', lg))
else:
new_entry.append((m, v))
return new_entry
def _box_markers(box):
if 'conf' in box:
conf = '{}: {}'.format(box['tie'], box['conf']) if 'tie' in box else box['conf']
yield 'conf', conf
if 'cona' in box:
cona = '{}: {}'.format(box['tin'], box['cona']) if 'tin' in box else box['cona']
yield 'cona', cona
if 'conv' in box:
conv = '{}: {}'.format(box['tiv'], box['conv']) if 'tiv' in box else box['conv']
yield 'conv', conv
def merge_infobox_titles(entry):
box_markers = {'enc', 'tie', 'tin', 'tiv', 'conv', 'conf', 'cona'}
box = {}
new_entry = entry.__class__()
for marker, value in entry:
if marker == 'enc':
box['enc'] = value
elif box:
if marker in box_markers:
box[marker] = value
else:
new_entry.extend(_box_markers(box))
box = {}
new_entry.append((marker, value))
else:
new_entry.append((marker, value))
if box:
new_entry.extend(_box_markers(box))
return new_entry
def merge_etymology(marker_dict):
return '{el}{sep1}{et}{sep2}{eg}'.format(
el=marker_dict.get('el') or '',
sep1=': ' if marker_dict.get('el') and len(marker_dict) > 1 else '',
et=marker_dict.get('et'),
sep2=' ' if marker_dict.get('el') and marker_dict.get('eg') else '',
eg="'{}'".format(marker_dict.get('eg')) if marker_dict.get('eg') else '')
def generate_link_label(entry):
link_label = entry.get('la') or entry.get('lx') or ''
new_entry = entry.__class__(entry)
new_entry.insert(1, ('link_label', link_label))
return new_entry
def preprocess(entry):
"""Use this function if you need to change the contents of an entry before
any other processing.
This is run on every entry in the SFM database.
"""
entry = remove_markers(('dnu',), entry)
entry = move_images_into_sense(entry)
entry = marker_fallback_sense(entry, 'de', 'gn')
entry = marker_fallback_sense(entry, 'gxx', 'ge')
entry = marker_fallback_sense(entry, 'gxy', 'gr')
entry = merge_infobox_titles(entry)
entry = merge_markers(entry, ['ue', 'ee'], 'ee')
entry = merge_markers(entry, ['un', 'en'], 'en')
entry = merge_markers(entry, ['pdl', 'pdv'], 'pdv')
entry = merge_markers(entry, ['el', 'et', 'eg'], 'et', format_fn=merge_etymology)
entry = generate_link_label(entry)
return entry
def _remove_inline_markers(val):
if isinstance(val, str):
return re.sub(r'\|\w+\{([^}]+)\}', r'\1', val)
else:
return val
def _warn_about_table(table_name, table, columns, link_regex, cldf_log):
if not columns:
return
for row in table:
row_id = row.get('ID')
for colname, value in row.items():
if colname not in columns:
continue
for link_match in re.finditer(link_regex, value):
link = link_match.group(0)
if re.fullmatch(r'\s*\[.*\]\s*\(.*\)\s*', link):
continue
msg = '{}:{}:{}:unknown in-line cross reference `{}`'.format(
table_name, row.get('ID'), colname, link)
cldf_log.warn(msg)
def warn_about_inline_references(
entries, senses, examples, props, cldf_log
):
props = sfm2cldf._add_property_fallbacks(props)
if not props.get('link_regex') or not props.get('process_links_in_markers'):
return
_warn_about_table(
'EntryTable',
entries,
{
props['entry_map'][m]
for m in props['process_links_in_markers']
if m in props['entry_map']
},
props['link_regex'],
cldf_log)
_warn_about_table(
'SenseTable',
senses,
{
props['sense_map'][m]
for m in props['process_links_in_markers']
if m in props['sense_map']
},
props['link_regex'],
cldf_log)
_warn_about_table(
'ExampleTable',
examples,
{
props['example_map'][m]
for m in props['process_links_in_markers']
if m in props['example_map']
},
props['link_regex'],
cldf_log)
def remove_inline_markers(val):
if isinstance(val, list):
return [_remove_inline_markers(v) for v in val]
else:
return _remove_inline_markers(val)
def clean_table(table):
return [
{k: remove_inline_markers(v) for k, v in row.items()}
for row in table]
def authors_string(authors):
"""Return formatted string of all authors."""
def is_primary(a):
return not isinstance(a, dict) or a.get('primary', True)
primary = ' and '.join(
a['name'] if isinstance(a, dict) else a
for a in authors
if is_primary(a))
secondary = ' and '.join(
a['name']
for a in authors
if not is_primary(a))
if primary and secondary:
return '{} with {}'.format(primary, secondary)
else:
return primary or secondary
class Dataset(BaseDataset):
dir = pathlib.Path(__file__).parent
id = "teanu"
def cldf_specs(self): # A dataset must declare all CLDF sets it creates.
return CLDFSpec(
dir=self.cldf_dir,
module='Dictionary',
metadata_fname='cldf-metadata.json')
def cmd_download(self, args):
"""
Download files to the raw/ directory. You can use helpers methods of `self.raw_dir`, e.g.
>>> self.raw_dir.download(url, fname)
"""
pass
def cmd_makecldf(self, args):
"""
Convert the raw data to a CLDF dataset.
>>> args.writer.objects['LanguageTable'].append(...)
"""
# read data
md = self.etc_dir.read_json('md.json')
properties = md.get('properties') or {}
language_name = md['language']['name']
isocode = md['language']['isocode']
language_id = md['language']['isocode']
glottocode = md['language']['glottocode']
marker_map = ChainMap(
properties.get('marker_map') or {},
sfm2cldf.DEFAULT_MARKER_MAP)
entry_sep = properties.get('entry_sep') or sfm2cldf.DEFAULT_ENTRY_SEP
sfm = SFM(
self.raw_dir / 'db.sfm',
marker_map=marker_map,
entry_sep=entry_sep)
examples = sfm2cldf.load_examples(self.raw_dir / 'examples.sfm')
if (self.raw_dir / 'sources.bib').exists():
sources = parse_string(self.raw_dir.read('sources.bib'), 'bibtex')
else:
sources = None
if (self.etc_dir / 'cdstar.json').exists():
media_catalog = self.etc_dir.read_json('cdstar.json')
else:
media_catalog = {}
# preprocessing
sfm.visit(prepreprocess)
sfm = reorganize(sfm)
sfm.visit(preprocess)
# processing
with open(self.dir / 'cldf.log', 'w', encoding='utf-8') as log_file:
log_name = '%s.cldf' % language_id
cldf_log = sfm2cldf.make_log(log_name, log_file)
entries, senses, examples, media = sfm2cldf.process_dataset(
self.id, language_id, properties,
sfm, examples, media_catalog=media_catalog,
glosses_path=self.raw_dir / 'glosses.flextext',
examples_log_path=self.dir / 'examples.log',
glosses_log_path=self.dir / 'glosses.log',
cldf_log=cldf_log)
# Note: If you want to manipulate the generated CLDF tables before
# writing them to disk, this would be a good place to do it.
warn_about_inline_references(
entries, senses, examples, properties, cldf_log)
entries = clean_table(entries)
senses = clean_table(senses)
examples = clean_table(examples)
media = clean_table(media)
# cldf schema
sfm2cldf.make_cldf_schema(
args.writer.cldf, properties,
entries, senses, examples, media)
sfm2cldf.attach_column_titles(args.writer.cldf, properties)
print(file=log_file)
entries = sfm2cldf.ensure_required_columns(
args.writer.cldf, 'EntryTable', entries, cldf_log)
senses = sfm2cldf.ensure_required_columns(
args.writer.cldf, 'SenseTable', senses, cldf_log)
examples = sfm2cldf.ensure_required_columns(
args.writer.cldf, 'ExampleTable', examples, cldf_log)
media = sfm2cldf.ensure_required_columns(
args.writer.cldf, 'media.csv', media, cldf_log)
entries = sfm2cldf.remove_senseless_entries(
senses, entries, cldf_log)
# output
if sources:
args.writer.cldf.add_sources(sources)
args.writer.cldf.properties['dc:creator'] = authors_string(
md.get('authors') or ())
language = {
'ID': language_id,
'Name': language_name,
'ISO639P3code': isocode,
'Glottocode': glottocode,
}
args.writer.objects['LanguageTable'] = [language]
args.writer.objects['EntryTable'] = entries
args.writer.objects['SenseTable'] = senses
args.writer.objects['ExampleTable'] = examples
args.writer.objects['media.csv'] = media
|
nilq/baby-python
|
python
|
import requests
from django.conf import settings
DEBUG = getattr(settings, 'DEBUG')
class AppleService(object):
def __init__(self, base_url):
self.base_url = base_url
def notify_all(self, resources, message):
for resource in resources:
data = '{"resource": "' + resource + '", "message": "' + message + '"}'
self._send("/notification/all", data)
def notify_user(self, user, resources, message):
for resource in resources:
data = '{"owner": "' + user + '", "resource": "' + resource + '", "message": "' + message + '"}'
self._send("/notification", data)
def notify_users(self, users, resources, message):
for user in users:
self.notify_user(user, resources, message)
def notify_end_users(self, users, resources, message):
if users:
self.notify_users(users, resources, message)
else:
self.notify_all(resources, message)
def _send(self, relative_url, data):
url = self.base_url + relative_url
r = requests.put(url, data)
if DEBUG:
f = open('log.html', 'w')
f.write(r.text)
f.close()
def get_registered_users(self):
url = self.base_url + "/pii/registeredusers"
r = requests.get(url)
if r.status_code != requests.codes.ok:
return
dictionary = r.json()
return dictionary.get("owners")
|
nilq/baby-python
|
python
|
from keepkeylib.client import proto, BaseClient, ProtocolMixin
from ..trezor.clientbase import TrezorClientBase
class KeepKeyClient(TrezorClientBase, ProtocolMixin, BaseClient):
def __init__(self, transport, handler, plugin):
BaseClient.__init__(self, transport)
ProtocolMixin.__init__(self, transport)
TrezorClientBase.__init__(self, handler, plugin, proto)
def recovery_device(self, *args):
ProtocolMixin.recovery_device(self, False, *args)
TrezorClientBase.wrap_methods(KeepKeyClient)
|
nilq/baby-python
|
python
|
userInput = ('12')
userInput = int(userInput)
print(userInput)
|
nilq/baby-python
|
python
|
import json
import os
from typing import Any, Dict, List, Union
here = os.path.abspath(os.path.dirname(__file__))
def _load_list(paths: List[str]) -> dict:
content: Dict[str, Any] = {}
for p in paths:
with open(p) as h:
t = json.load(h)
content.update(t)
return content
def load_json(path_or_dir: Union[str, List[str]]) -> dict:
path_error = (
"replacy.db.load_json expects a valid path to a json file, "
"a list of (valid) paths to json files, "
"or the (valid) path to a directory with json files"
f", but received {path_or_dir}"
)
if type(path_or_dir) == str:
json_path = str(path_or_dir) # make mypy happy
if (
os.path.exists(json_path)
and os.path.isfile(json_path)
and json_path[-5:] == ".json"
):
with open(json_path) as h:
content = json.load(h)
elif os.path.isdir(json_path):
paths = [
os.path.join(json_path, f)
for f in os.listdir(json_path)
if f.endswith(".json")
]
content = _load_list(paths)
else:
raise ValueError(path_error)
elif type(path_or_dir) == list:
paths = list(path_or_dir) # for mypy
content = _load_list(paths)
else:
raise TypeError(path_error)
return content
def get_forms_lookup(forms_path="resources/forms_lookup.json"):
matches_path = os.path.join(here, forms_path)
return load_json(matches_path)
def get_match_dict(match_path="resources/match_dict.json"):
matches_path = os.path.join(here, match_path)
return load_json(matches_path)
def get_match_dict_schema(schema_path="resources/match_dict_schema.json"):
full_schema_path = os.path.join(here, schema_path)
return load_json(full_schema_path)
def get_patterns_test_data(data_path="resources/patterns_test_data.json"):
test_data_path = os.path.join(here, data_path)
return load_json(test_data_path)
def load_lm(model_path):
import kenlm
return kenlm.Model(model_path)
|
nilq/baby-python
|
python
|
'''
Copyright (c) 2018 Modul 9/HiFiBerry
2020 Christoffer Sawicki
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import time
import logging
import struct
from threading import Thread
from hifiberrydsp.filtering.volume import percent2amplification
from hifiberrydsp import datatools
try:
from hifiberrydsp.hardware.adau145x import Adau145x
from hifiberrydsp.hardware.spi import SpiHandler
# depends on spidev and is not required to run tests
except:
pass
class SoundSync(Thread):
'''
Implements reverse-engineered LG Sound Sync to set main volume control
'''
def __init__(self):
self.dsp = Adau145x
self.spi = SpiHandler
self.finished = False
self.detected = False
self.volume_register = None
self.spdif_active_register = None
Thread.__init__(self)
def set_registers(self, volume_register, spdif_active_register):
logging.info("LG Sound Sync: Using volume register at %s and SPDIF active register at %s",
volume_register, spdif_active_register)
self.volume_register = volume_register
self.spdif_active_register = spdif_active_register
def update_volume(self):
if self.volume_register is None:
return False
if (self.spdif_active_register is not None) and (not self.is_spdif_active()):
return False
volume = self.try_read_volume()
if volume is None:
return False
self.write_volume(volume)
return True
def is_spdif_active(self):
if self.spdif_active_register is None:
return True
data = self.spi.read(self.spdif_active_register, 4)
[spdif_active] = struct.unpack(">l", data)
return spdif_active != 0
def try_read_volume(self):
spdif_status_register = 0xf617
return self.parse_volume_from_status(self.spi.read(spdif_status_register, 5))
# Volume ~~~~~
# 0: 00f048a$ This is what the SPDIF status registers look like with different volume levels set.
# 1: 01f048a$
# 2: 02f048a$ We check for f048a (SIGNATURE_VALUE) to see if LG Sound Sync is enabled.
# 3: 03f048a$
# 100: 64f048a$ The byte to the left is the volume we want to extract.
# ~~ The first bit is set to 1 when muted.
SIGNATURE_MASK = 0xfffff
SIGNATURE_VALUE = 0xf048a
SHIFT = 5 * 4
MUTE_MASK = 0b10000000
VOLUME_MASK = 0b01111111
@staticmethod
def parse_volume_from_status(data):
bits = int.from_bytes(data, byteorder="big")
if bits & SoundSync.SIGNATURE_MASK != SoundSync.SIGNATURE_VALUE:
return None
if bits >> SoundSync.SHIFT & SoundSync.MUTE_MASK:
return 0
return bits >> SoundSync.SHIFT & SoundSync.VOLUME_MASK
def write_volume(self, volume):
assert 0 <= volume <= 100
dspdata = datatools.int_data(self.dsp.decimal_repr(percent2amplification(volume)),
self.dsp.WORD_LENGTH)
self.spi.write(self.volume_register, dspdata)
POLL_INTERVAL = 0.3
def run(self):
try:
while not self.finished:
previously_detected = self.detected
self.detected = self.update_volume()
if not previously_detected and self.detected:
logging.info("LG Sound Sync started")
elif previously_detected and not self.detected:
logging.info("LG Sound Sync stopped")
if self.detected:
time.sleep(self.POLL_INTERVAL)
else:
time.sleep(self.POLL_INTERVAL * 10)
except Exception:
logging.exception("LG Sound Sync crashed")
def finish(self):
self.finished = True
|
nilq/baby-python
|
python
|
# Module fwpd_histogram
import ctypes as ct
import time
from modules.example_helpers import *
def enable_characterization(ADQAPI, adq_cu, adq_num, channel, enable, only_metadata):
# Enable logic and release reset
assert (channel < 5 and channel > 0), "Channel must be between 1-4."
# Lookup base address for histogram setup registers
base_addr = (channel-1) * (2**(21-2-2)) + 1*(2**(21-2-4))
# Pull reset (create a negedge)
ADQAPI.ADQ_WriteUserRegister(adq_cu, adq_num, 2, base_addr, 0xfffffffd, 0x00000002, 0)
ADQAPI.ADQ_WriteUserRegister(adq_cu, adq_num, 2, base_addr, 0xfffffffd, 0x00000000, 0)
# Enable characterization if 'enable' is True
if enable:
ADQAPI.ADQ_WriteUserRegister(adq_cu, adq_num, 2, base_addr, 0xfffffff9, 0x00000002, 0)
else:
ADQAPI.ADQ_WriteUserRegister(adq_cu, adq_num, 2, base_addr, 0xfffffff9, 0x00000006, 0)
# Enable metadata mode if 'only_metadata' is True
if only_metadata:
ADQAPI.ADQ_WriteUserRegister(adq_cu, adq_num, 2, base_addr, 0xfffffff7, 0x00000008, 0)
else:
ADQAPI.ADQ_WriteUserRegister(adq_cu, adq_num, 2, base_addr, 0xfffffff7, 0x00000000, 0)
# Strobe register load bit (for enable)
ADQAPI.ADQ_WriteUserRegister(adq_cu, adq_num, 2, base_addr, 0xfffffffe, 0x00000001, 0)
ADQAPI.ADQ_WriteUserRegister(adq_cu, adq_num, 2, base_addr, 0xfffffffe, 0x00000000, 0)
def _setup(ADQAPI, adq_cu, adq_num, base_addr, scale, offset):
# Set histogram bin scaling
ADQAPI.ADQ_WriteUserRegister(adq_cu, adq_num, 2, base_addr, 0x00000000, scale, 0)
# Set histogram bin offset
ADQAPI.ADQ_WriteUserRegister(adq_cu, adq_num, 2, base_addr+1, 0x00000000, offset, 0)
def _reset(ADQAPI, adq_cu, adq_num, base_addr, hist_size):
# Write zero to all bins
if ADQAPI.ADQ_IsUSB3Device(adq_cu, adq_num):
zero_block = (ct.c_uint32*hist_size)()
ct.memset(ct.byref(zero_block), 0, hist_size)
status = ADQAPI.ADQ_WriteBlockUserRegister(adq_cu, adq_num, 2, base_addr, ct.byref(zero_block), hist_size*4, 1)
print('ADQAPI.ADQ_WriteBlockUserRegister returned {}'.format(adq_status(status)))
else:
for idx in range(hist_size):
ADQAPI.ADQ_WriteUserRegister(adq_cu, adq_num, 2, base_addr+idx, 0x0, 0x0, 0)
def _fetch(ADQAPI, adq_cu, adq_num, base_addr, hist_size):
# Fetch data from histogram memory
hist = (ct.c_uint32*hist_size)()
if ADQAPI.ADQ_IsUSB3Device(adq_cu, adq_num):
ADQAPI.ADQ_ReadBlockUserRegister(adq_cu, adq_num, 2, base_addr, ct.byref(hist), hist_size*4, 1)
else:
value = ct.c_uint32()
for idx in range(hist_size):
ADQAPI.ADQ_ReadUserRegister(adq_cu, adq_num, 2, base_addr+idx, ct.byref(value))
hist[idx] = value.value
return hist
def _get_mem_base(channel, hist_type):
# Lookup base address for histogram memory
assert (channel < 5 and channel > 0), "Channel must be between 1-4."
if (hist_type == 'tot'):
return (channel-1) * (2**(21-2-2)) + 2*(2**(21-2-4))
if (hist_type == 'extr'):
return (channel-1) * (2**(21-2-2)) + 3*(2**(21-2-4))
else:
assert False, "Unknown hist_type {}.".format(hist_type)
def _get_setup_base(channel, hist_type):
# Lookup base address for histogram setup registers
assert (channel < 5 and channel > 0), "Channel must be between 1-4."
if (hist_type == 'tot'):
return (channel-1) * (2**(21-2-2)) + 1*(2**(21-2-4)) + 1
if (hist_type == 'extr'):
return (channel-1) * (2**(21-2-2)) + 1*(2**(21-2-4)) + 4
else:
assert False, "Unknown hist_type {}.".format(hist_type)
def _get_hist_size(hist_type):
# Lookup histogram size
if (hist_type == 'tot'):
# TOT histogram is 4k+3 bins
return 1024*16+3
if (hist_type == 'extr'):
# TOT histogram is 16k+3 bins
return 1024*4+3
else:
assert False, "Unknown hist_type {}.".format(hist_type)
def setup_tot(ADQAPI, adq_cu, adq_num, channel, scale, offset):
return _setup(ADQAPI, adq_cu, adq_num, _get_setup_base(channel, 'tot'), scale, offset)
def setup_extr(ADQAPI, adq_cu, adq_num, channel, scale, offset):
return _setup(ADQAPI, adq_cu, adq_num, _get_setup_base(channel, 'extr'), scale, offset)
def reset_tot(ADQAPI, adq_cu, adq_num, channel):
return _reset(ADQAPI, adq_cu, adq_num, _get_mem_base(channel, 'tot'), _get_hist_size('tot'))
def reset_extr(ADQAPI, adq_cu, adq_num, channel):
return _reset(ADQAPI, adq_cu, adq_num, _get_mem_base(channel, 'extr'), _get_hist_size('extr'))
def fetch_tot(ADQAPI, adq_cu, adq_num, channel):
return _fetch(ADQAPI, adq_cu, adq_num, _get_mem_base(channel, 'tot'), _get_hist_size('tot'))
def fetch_extr(ADQAPI, adq_cu, adq_num, channel):
return _fetch(ADQAPI, adq_cu, adq_num, _get_mem_base(channel, 'extr'), _get_hist_size('extr'))
|
nilq/baby-python
|
python
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
import pytest
import mlos.global_values
from mlos.OptimizerEvaluationTools.ObjectiveFunctionFactory import ObjectiveFunctionFactory, objective_function_config_store
from mlos.Optimizers.RegressionModels.GoodnessOfFitMetrics import DataSetType
from mlos.Optimizers.RegressionModels.LassoCrossValidatedConfigStore import lasso_cross_validated_config_store
from mlos.Optimizers.RegressionModels.LassoCrossValidatedRegressionModel import LassoCrossValidatedRegressionModel
from mlos.Optimizers.RegressionModels.MultiObjectiveLassoCrossValidated import MultiObjectiveLassoCrossValidated
from mlos.Logger import create_logger
class TestMultiObjectiveLassoCrossValidated:
@classmethod
def setup_class(cls) -> None:
mlos.global_values.declare_singletons()
cls.logger = create_logger("TestMultiObjectiveLassoCrossValidated")
@pytest.mark.parametrize('objective_function_config_name', ["2d_hypersphere_minimize_some", "10d_hypersphere_minimize_some", "5_mutually_exclusive_polynomials"])
def test_default_config(self, objective_function_config_name):
objective_function_config = objective_function_config_store.get_config_by_name(objective_function_config_name)
objective_function = ObjectiveFunctionFactory.create_objective_function(objective_function_config)
lasso_model_config = lasso_cross_validated_config_store.default
multi_objective_rf = MultiObjectiveLassoCrossValidated(
model_config=lasso_model_config,
input_space=objective_function.parameter_space,
output_space=objective_function.output_space,
logger=self.logger
)
if objective_function_config_name == '2d_hypersphere_minimize_some':
num_training_samples = 25
num_testing_samples = 10
elif objective_function_config_name == '10d_hypersphere_minimize_some':
num_training_samples = 50
num_testing_samples = 10
elif objective_function_config_name == '5_mutually_exclusive_polynomials':
num_training_samples = 100
num_testing_samples = 50
else:
assert False
train_params_df = objective_function.parameter_space.random_dataframe(num_samples=num_training_samples)
train_objectives_df = objective_function.evaluate_dataframe(train_params_df)
test_params_df = objective_function.parameter_space.random_dataframe(num_samples=num_testing_samples)
test_objectives_df = objective_function.evaluate_dataframe(test_params_df)
multi_objective_rf.fit(features_df=train_params_df, targets_df=train_objectives_df, iteration_number=num_training_samples)
multi_objective_predictions = multi_objective_rf.predict(features_df=train_params_df, include_only_valid_rows=True)
# TRAINING DATA
#
print("------------------------------------------------------------------------------------")
print("--------------------------------------- TRAIN --------------------------------------")
print("------------------------------------------------------------------------------------")
training_gof = multi_objective_rf.compute_goodness_of_fit(features_df=train_params_df, targets_df=train_objectives_df, data_set_type=DataSetType.TRAIN)
for objective_name in objective_function.output_space.dimension_names:
print("------------------------------------------------------------------------------------")
print(objective_name)
print(training_gof[objective_name].to_json(indent=2))
# TESTING DATA
print("------------------------------------------------------------------------------------")
print("--------------------------------------- TEST ---------------------------------------")
print("------------------------------------------------------------------------------------")
testing_gof = multi_objective_rf.compute_goodness_of_fit(features_df=test_params_df, targets_df=test_objectives_df, data_set_type=DataSetType.TEST_KNOWN_RANDOM)
for objective_name in objective_function.output_space.dimension_names:
print("------------------------------------------------------------------------------------")
print(objective_name)
print(testing_gof[objective_name].to_json(indent=2))
|
nilq/baby-python
|
python
|
from allauth.account import signals
from allauth.account.views import SignupView
from allauth.account.utils import send_email_confirmation
from allauth.exceptions import ImmediateHttpResponse
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.http import HttpRequest, HttpResponseRedirect
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.views.generic import (
CreateView,
DeleteView,
DetailView,
ListView,
RedirectView,
UpdateView,
)
from arike.facilities.models import Facility
from arike.users.forms import UserForm, UserSignupForm
from arike.users.models import UserRoles
User = get_user_model()
class AdminAuthMixin(LoginRequiredMixin, UserPassesTestMixin):
def test_func(self):
return self.request.user.role == UserRoles.DISTRICT_ADMIN
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
user_detail_view = UserDetailView.as_view()
class UserFormView(AdminAuthMixin):
form_class = UserForm
template_name = "users/user_form.html"
slug_field = "username"
slug_url_kwarg = "username"
def get_queryset(self):
district = self.request.user.facility.ward.lsg_body.district
users = User.objects.filter(
deleted=False, facility__ward__lsg_body__district=district
).exclude(role=UserRoles.DISTRICT_ADMIN)
return users
def get_success_url(self):
return "/users/list/"
class NurseSignUpView(AdminAuthMixin, SignupView):
form_class = UserSignupForm
template_name = "users/user_form.html"
slug_field = "username"
slug_url_kwarg = "username"
def get_success_url(self):
return "/users/list/"
def form_valid(self, form):
self.user = form.save(self.request)
try:
signals.user_signed_up.send(
sender=self.user.__class__, request=self.request, user=self.user, **{}
)
send_email_confirmation(self.request, self.user, True)
return HttpResponseRedirect(self.get_success_url())
except ImmediateHttpResponse as e:
return e.response
class NurseDeleteView(UserFormView, DeleteView):
def delete(self, request: HttpRequest, *args: str, **kwargs):
self.object = self.get_object()
success_url = self.get_success_url()
self.object.deleted = True
self.object.save()
return HttpResponseRedirect(success_url)
class NurseUpdateView(UserFormView, UpdateView):
pass
class UserListVeiw(AdminAuthMixin, ListView):
model = User
template_name = "users/list.html"
context_object_name = "users"
def get_queryset(self):
district = self.request.user.facility.ward.lsg_body.district
users = User.objects.filter(
deleted=False, facility__ward__lsg_body__district=district
).exclude(role=UserRoles.DISTRICT_ADMIN)
search_filter = self.request.GET.get("search")
role_filter = self.request.GET.get("role")
facility_filter = self.request.GET.get("facility")
if search_filter is not None:
users = users.filter(name__icontains=search_filter)
if role_filter is not None:
users = users.filter(role=role_filter)
if facility_filter is not None:
users = users.filter(facility=facility_filter)
return users
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
district = self.request.user.facility.ward.lsg_body.district
ctx["facilities"] = Facility.objects.filter(ward__lsg_body__district=district)
return ctx
class UserUpdateView(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = User
fields = ["name", "email", "phone", "facility"]
success_message = _("Information successfully updated")
def get_success_url(self):
assert (
self.request.user.is_authenticated
) # for mypy to know that the user is authenticated
return self.request.user.get_absolute_url()
def get_object(self):
return self.request.user
user_update_view = UserUpdateView.as_view()
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
user_redirect_view = UserRedirectView.as_view()
|
nilq/baby-python
|
python
|
# encoding: utf-8
import sys
from PyQt5 import QtQuick
from PyQt5.QtCore import QObject, pyqtSlot, QTimer
from PyQt5.QtGui import QIcon
from PyQt5.QtQml import QQmlApplicationEngine
from PyQt5.QtWidgets import QWidget, QApplication, QLabel
from resources import resources
print(QtQuick)
print(resources)
class MyAppMainWindow(QWidget):
def __init__(self):
super(MyAppMainWindow, self).__init__()
self.initUI()
def initUI(self):
##
self.resize(800, 600)
##
self.label = QLabel(self)
self.label.setText("哈哈哈哈")
##
screen = QApplication.primaryScreen()
qpixmap = screen.grabWindow(0)
print(qpixmap)
self.label.setPixmap(qpixmap)
##
self.setWindowTitle("My First PyQt5 App")
self.setWindowIcon(QIcon('icon.ico'))
#
self.show()
class ScreenCaptureWindow(QWidget):
def __init__(self):
super(ScreenCaptureWindow, self).__init__()
self.index = 0
self.resize(800, 600)
self.setWindowTitle("录屏实况")
self.label = QLabel(self)
self.timer = QTimer()
self.timer.timeout.connect(self.capture_screen)
self.capture_screen()
def capture_screen(self):
self.index += 1
screen = QApplication.primaryScreen()
screen_img = screen.grabWindow(0)
self.label.setPixmap(screen_img)
print(screen_img)
print("截图:" + str(self.index))
def capture_screen_start(self):
self.show()
self.timer.start(100)
def capture_screen_stop(self):
self.hide()
self.timer.stop()
class MyClass(QObject):
def __init__(self):
super(MyClass, self).__init__()
self.scw = ScreenCaptureWindow()
@pyqtSlot(str)
def screen_capture_start(self):
self.scw.capture_screen_start()
@pyqtSlot(str)
def screen_capture_stop(self):
self.scw.capture_screen_stop()
if __name__ == "__main__":
app = QApplication(sys.argv)
engine = QQmlApplicationEngine()
# engine.load("resources/qmls/app_main_window.qml")
# engine.load(QUrl("qrc:/resources/qmls/app_main_window.qml"))
engine.load(":/resources/qmls/app_main_window.qml")
con = MyClass()
context = engine.rootContext()
context.setContextProperty("con", con)
# myApp = MyAppMainWindow()
sys.exit(app.exec_())
|
nilq/baby-python
|
python
|
import logging
import os
import posixpath
from dvc.config import Config
from dvc.config import ConfigError
from dvc.utils import relpath
from dvc.utils.compat import urlparse
logger = logging.getLogger(__name__)
class RemoteConfig(object):
def __init__(self, config):
self.config = config
def get_settings(self, name):
"""
Args:
name (str): The name of the remote that we want to retrieve
Returns:
dict: The content beneath the given remote name.
Example:
>>> config = {'remote "server"': {'url': 'ssh://localhost/'}}
>>> get_settings("server")
{'url': 'ssh://localhost/'}
"""
settings = self.config.config.get(
Config.SECTION_REMOTE_FMT.format(name.lower())
)
if settings is None:
raise ConfigError(
"unable to find remote section '{}'".format(name)
)
parsed = urlparse(settings["url"])
# Support for cross referenced remotes.
# This will merge the settings, giving priority to the outer reference.
# For example, having:
#
# dvc remote add server ssh://localhost
# dvc remote modify server user root
# dvc remote modify server ask_password true
#
# dvc remote add images remote://server/tmp/pictures
# dvc remote modify images user alice
# dvc remote modify images ask_password false
# dvc remote modify images password asdf1234
#
# Results on a config dictionary like:
#
# {
# "url": "ssh://localhost/tmp/pictures",
# "user": "alice",
# "password": "asdf1234",
# "ask_password": False,
# }
#
if parsed.scheme == "remote":
reference = self.get_settings(parsed.netloc)
url = posixpath.join(reference["url"], parsed.path.lstrip("/"))
merged = reference.copy()
merged.update(settings)
merged["url"] = url
return merged
return settings
@staticmethod
def resolve_path(path, config_file):
"""Resolve path relative to config file location.
Args:
path: Path to be resolved.
config_file: Path to config file, which `path` is specified
relative to.
Returns:
Path relative to the `config_file` location. If `path` is an
absolute path then it will be returned without change.
"""
if os.path.isabs(path):
return path
return relpath(path, os.path.dirname(config_file))
def add(self, name, url, default=False, force=False, level=None):
from dvc.remote import _get, RemoteLOCAL
configobj = self.config.get_configobj(level)
remote = _get({Config.SECTION_REMOTE_URL: url})
if remote == RemoteLOCAL and not url.startswith("remote://"):
url = self.resolve_path(url, configobj.filename)
self.config.set(
Config.SECTION_REMOTE_FMT.format(name),
Config.SECTION_REMOTE_URL,
url,
force=force,
level=level,
)
if default:
logger.info("Setting '{}' as a default remote.".format(name))
self.config.set(
Config.SECTION_CORE,
Config.SECTION_CORE_REMOTE,
name,
level=level,
)
def remove(self, name, level=None):
self.config.unset(Config.SECTION_REMOTE_FMT.format(name), level=level)
if level is None:
level = Config.LEVEL_REPO
for lev in [
Config.LEVEL_LOCAL,
Config.LEVEL_REPO,
Config.LEVEL_GLOBAL,
Config.LEVEL_SYSTEM,
]:
self.config.unset(
Config.SECTION_CORE,
Config.SECTION_CORE_REMOTE,
level=lev,
force=True,
)
if lev == level:
break
def modify(self, name, option, value, level=None):
self.config.set(
Config.SECTION_REMOTE_FMT.format(name), option, value, level=level
)
def list(self, level=None):
return self.config.list_options(
Config.SECTION_REMOTE_REGEX, Config.SECTION_REMOTE_URL, level=level
)
def set_default(self, name, unset=False, level=None):
if unset:
self.config.unset(Config.SECTION_CORE, Config.SECTION_CORE_REMOTE)
return
self.config.set(
Config.SECTION_CORE, Config.SECTION_CORE_REMOTE, name, level=level
)
|
nilq/baby-python
|
python
|
# DAACS ~= NASA Earthdata data centers
DAACS = [
{
"short-name": "NSIDC",
"name": "National Snow and Ice Data Center",
"homepage": "https://nsidc.org",
"cloud-providers": ["NSIDC_CPRD"],
"on-prem-providers": ["NSIDC_ECS"],
"s3-credentials": "https://data.nsidc.earthdatacloud.nasa.gov/s3credentials",
},
{
"short-name": "GHRC DAAC",
"name": "Global Hydrometeorology Resource Center",
"homepage": "https://ghrc.nsstc.nasa.gov/home/",
"cloud-providers": ["GHRC_DAAC"],
"on-prem-providers": ["GHRC_DAAC"],
"s3-credentials": "https://data.ghrc.earthdata.nasa.gov/s3credentials",
},
{
"short-name": "PO DAAC",
"name": "Physical Oceanography Distributed Active Archive Center",
"homepage": "https://podaac.jpl.nasa.gov",
"cloud-providers": ["POCLOUD"],
"on-prem-providers": ["PODAAC"],
"s3-credentials": "https://archive.podaac.earthdata.nasa.gov/s3credentials",
},
{
"short-name": "ASF",
"name": "Alaska Satellite Facility",
"homepage": "https://asf.alaska.edu",
"cloud-providers": ["ASF"],
"on-prem-providers": ["ASF"],
"s3-credentials": "",
},
{
"short-name": "ORNL DAAC",
"name": "Oak Ridge National Laboratory",
"homepage": "https://daac.ornl.gov",
"cloud-providers": ["ORNL_CLOUD"],
"on-prem-providers": ["ORNL_DAAC"],
"s3-credentials": "https://data.ornldaac.earthdata.nasa.gov/s3credentials",
},
{
"short-name": "LP DAAC",
"name": " Land Processes Distributed Active Archive Center",
"homepage": "https://lpdaac.usgs.gov",
"cloud-providers": ["LPCLOUD"],
"on-prem-providers": ["LPDAAC_ECS"],
"s3-credentials": "https://data.lpdaac.prod.earthdatacloud.nasa.gov/s3credentials",
},
{
"short-name": "GES DISC",
"name": "NASA Goddard Earth Sciences (GES) Data and Information Services Center (DISC)",
"homepage": "https://daac.gsfc.nasa.gov",
"cloud-providers": ["GES_DISC"],
"on-prem-providers": ["GES_DISC"],
"s3-credentials": "",
},
{
"short-name": "OB DAAC",
"name": "NASA's Ocean Biology Distributed Active Archive Center",
"homepage": "https://earthdata.nasa.gov/eosdis/daacs/obdaac",
"cloud-providers": [],
"on-prem-providers": ["OB_DAAC"],
"s3-credentials": "",
},
{
"short-name": "SEDAC",
"name": "NASA's Socioeconomic Data and Applications Center",
"homepage": "https://earthdata.nasa.gov/eosdis/daacs/sedac",
"cloud-providers": [],
"on-prem-providers": ["SEDAC"],
"s3-credentials": "",
},
]
CLOUD_PROVIDERS = [
"GES_DISC",
"LPCLOUD",
"NSIDC_CPRD",
"POCLOUD",
"ASF",
"GHRC_DAAC",
"ORNL_CLOUD",
]
|
nilq/baby-python
|
python
|
from tkinter import *
from SMExp import*
from DMExp import*
import pygame
import time
import random
class SMPage(Frame):
MUTE = False
INFO = False
DIM = 0
def __init__(self, parent, controller):
Frame.__init__(self, parent)
pygame.mixer.init()
SGMWall = PhotoImage(file="TwoDimension.png")
SGLabel = Label(self, image=SGMWall)
SGLabel.image = SGMWall
SGLabel.place(x=-2, y=-2)
Info = PhotoImage(file="InfoPopOne.png")
InfoPop = Label(self, image=Info)
InfoPop.image = Info
InfoPop.place(x=-2, y=-2)
InfoPop.lower()
Back = PhotoImage(file="DisneyBackbutton.png")
BackBtn = Button(self, image=Back, bd=0, bg='#182b3a', command=lambda: BackAct())
BackBtn.image = Back
BackBtn.place(x=-2, y=-2)
Info = PhotoImage(file="DisneyInfoButton.png")
InfoBtn = Button(self, image=Info, bd=0, bg='black', command=lambda: InfoAct())
InfoBtn.image = Info
InfoBtn.place(x=-2, y=698)
Music = PhotoImage(file="DisneyMusicOn.png")
MusicBtn = Button(self, image=Music, bd=0, bg='black', command=lambda: MuteAct())
MusicBtn.image = Music
MusicBtn.place(x=48, y=698)
MusicOff = PhotoImage(file="DisneyMusicOff.png")
MuteOff = Button(self, image=MusicOff, bd=0, bg='black', command=lambda: MuteAct())
MuteOff.image = MusicOff
MuteOff.place(x=48, y=698)
MuteOff.lower()
Random = PhotoImage(file="DisneyRandomButton.png")
RandBtn = Button(self, image=Random, bd=0, bg="black", command=lambda: RandAct())
RandBtn.image = Random
RandBtn.place(x=98, y=698)
Reset = PhotoImage(file="DisneyClearButton.png")
ResetBtn = Button(self, image=Reset, bd=0, bg="black", command=lambda: ResetAct())
ResetBtn.image = Reset
ResetBtn.place(x=148, y=698)
Dtm = PhotoImage(file="Button1.png")
DtmBtn = Button(self, image=Dtm, bd=0, command=lambda: DtmAct())
DtmBtn.image = Dtm
DtmBtn.place(x=48, y=174)
Inverse = PhotoImage(file="Button2.png")
InverseBtn = Button(self, image=Inverse, bd=0, command=lambda: InvAct())
InverseBtn.image = Inverse
InverseBtn.place(x=48, y=282)
Trans = PhotoImage(file="Button3.png")
TransBtn = Button(self, image=Trans, bd=0, command=lambda: TrpAct())
TransBtn.image = Trans
TransBtn.place(x=48, y=390)
Scal = PhotoImage(file="Button4.png")
ScalBtn = Button(self, image=Scal, bd=0, command=lambda: ScaAct())
ScalBtn.image = Scal
ScalBtn.place(x=48, y=498)
Multi = PhotoImage(file="Button5.png")
MultiBtn = Button(self, image=Multi, bd=0, command=lambda: MulAct())
MultiBtn.image = Multi
MultiBtn.place(x=48, y=606)
Triangle = PhotoImage(file="Button6.png")
TriangleBtn = Button(self, image=Triangle, bd=0, command=lambda: TriAct())
TriangleBtn.image = Triangle
TriangleBtn.place(x=281, y=174)
Trac = PhotoImage(file="Button7.png")
TraceBtn = Button(self, image=Trac, bd=0, command=lambda: TrcAct())
TraceBtn.image = Trac
TraceBtn.place(x=281, y=282)
LUdec = PhotoImage(file="Button8.png")
LUdecBtn = Button(self, image=LUdec, bd=0, command=lambda: LUDAct())
LUdecBtn.image = LUdec
LUdecBtn.place(x=281, y=390)
Rank = PhotoImage(file="Button9.png")
RankBtn = Button(self, image=Rank, bd=0, command=lambda: RanAct())
RankBtn.image = Rank
RankBtn.place(x=281, y=498)
Pwr = PhotoImage(file="Button10.png")
PwrBtn = Button(self, image=Pwr, bd=0, command=lambda: PowAct())
PwrBtn.image = Pwr
PwrBtn.place(x=281, y=606)
TwoMatrix = PhotoImage(file="twoD.png")
TwoMatrixBtn = Button(self, image=TwoMatrix, bd=0, command=lambda: EntLIFT2())
TwoMatrixBtn.image = TwoMatrix
TwoMatrixBtn.place(x=514, y=403)
ThreeMatrix = PhotoImage(file="threeD.png")
ThreeMatrixBtn = Button(self, image=ThreeMatrix, bd=0, command=lambda: EntLIFT3())
ThreeMatrixBtn.image = ThreeMatrix
ThreeMatrixBtn.place(x=634, y=403)
FourMatrix = PhotoImage(file="fourD.png")
FourMatrixBtn = Button(self, image=FourMatrix, bd=0, command=lambda: EntLIFT4())
FourMatrixBtn.image = FourMatrix
FourMatrixBtn.place(x=754, y=403)
def validate(string):
regex = re.compile(r"(\+|\-)?[0-9.]*$")
result = regex.match(string)
return (string == ""
or (string.count('+') <= 1
and string.count('-') <= 1
and string.count('.') <= 1
and result is not None
and result.group(0) != ""))
def on_validate(P):
return validate(P)
M1aEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M1aEnt.config(validatecommand=(M1aEnt.register(on_validate), '%P'))
M1aEnt.place(x=529, y=61, width=50, height=50)
M1bEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M1bEnt.config(validatecommand=(M1bEnt.register(on_validate), '%P'))
M1bEnt.place(x=609, y=61, width=50, height=50)
M1cEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M1cEnt.config(validatecommand=(M1cEnt.register(on_validate), '%P'))
M1cEnt.place(x=689, y=61, width=50, height=50)
M1dEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M1dEnt.config(validatecommand=(M1dEnt.register(on_validate), '%P'))
M1dEnt.place(x=769, y=61, width=50, height=50)
M2aEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M2aEnt.config(validatecommand=(M2aEnt.register(on_validate), '%P'))
M2aEnt.place(x=529, y=146, width=50, height=50)
M2bEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M2bEnt.config(validatecommand=(M2bEnt.register(on_validate), '%P'))
M2bEnt.place(x=609, y=146, width=50, height=50)
M2cEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M2cEnt.config(validatecommand=(M2cEnt.register(on_validate), '%P'))
M2cEnt.place(x=689, y=146, width=50, height=50)
M2dEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M2dEnt.config(validatecommand=(M2dEnt.register(on_validate), '%P'))
M2dEnt.place(x=769, y=146, width=50, height=50)
M3aEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M3aEnt.config(validatecommand=(M3aEnt.register(on_validate), '%P'))
M3aEnt.place(x=529, y=231, width=50, height=50)
M3bEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M3bEnt.config(validatecommand=(M3bEnt.register(on_validate), '%P'))
M3bEnt.place(x=609, y=231, width=50, height=50)
M3cEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M3cEnt.config(validatecommand=(M3cEnt.register(on_validate), '%P'))
M3cEnt.place(x=689, y=231, width=50, height=50)
M3dEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M3dEnt.config(validatecommand=(M3dEnt.register(on_validate), '%P'))
M3dEnt.place(x=769, y=231, width=50, height=50)
M4aEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M4aEnt.config(validatecommand=(M4aEnt.register(on_validate), '%P'))
M4aEnt.place(x=529, y=316, width=50, height=50)
M4bEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M4bEnt.config(validatecommand=(M4bEnt.register(on_validate), '%P'))
M4bEnt.place(x=609, y=316, width=50, height=50)
M4cEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M4cEnt.config(validatecommand=(M4cEnt.register(on_validate), '%P'))
M4cEnt.place(x=689, y=316, width=50, height=50)
M4dEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center", validate="key")
M4dEnt.config(validatecommand=(M4dEnt.register(on_validate), '%P'))
M4dEnt.place(x=769, y=316, width=50, height=50)
MultiEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center",validate="key")
MultiEnt.config(validatecommand=(MultiEnt.register(on_validate), '%P'))
MultiEnt.place(x=176, y=611, width=50, height=50)
PowEnt = Entry(self, background="white", font="-family {Segoe Print} -size 16", justify="center",validate="key")
PowEnt.config(validatecommand=(PowEnt.register(on_validate), '%P'))
PowEnt.place(x=411, y=611, width=50, height=50)
Result = Label(self, bg='#016738', fg='white', anchor='w', justify="left")
Result.place(x=898, y=50, width=344, height=685)
Result1 = Label(self, bg='#016738', fg='white', anchor='w', justify="right")
Result1.place(x=898, y=50, width=172, height=685)
Result2 = Label(self, bg='#016738', fg='white', anchor='w', justify="left")
Result2.place(x=1070, y=50, width=172, height=685)
def EntLOWER():
Ent = (M1aEnt, M1bEnt, M1cEnt, M1dEnt, M2aEnt, M2bEnt, M2cEnt, M2dEnt, M3aEnt, M3bEnt, M3cEnt, M3dEnt, M4aEnt, M4bEnt, M4cEnt, M4dEnt)
for i in range (16):
Ent[i].lower()
EntLOWER()
def EntLIFT4():
SMPage.DIM = 4
EntLOWER()
Ent = (M1aEnt, M1bEnt, M1cEnt, M1dEnt, M2aEnt, M2bEnt, M2cEnt, M2dEnt, M3aEnt, M3bEnt, M3cEnt, M3dEnt, M4aEnt,M4bEnt, M4cEnt, M4dEnt)
for i in range(16):
Ent[i].lift()
def EntLIFT3():
SMPage.DIM = 3
EntLOWER()
Ent = (M1aEnt, M1bEnt, M1cEnt, M2aEnt, M2bEnt, M2cEnt, M3aEnt, M3bEnt, M3cEnt)
for i in range(9):
Ent[i].lift()
def EntLIFT2():
SMPage.DIM = 2
EntLOWER()
Ent = (M2bEnt, M2cEnt, M3bEnt, M3cEnt)
for i in range(4):
Ent[i].lift()
def SM():
if SMPage.DIM==2:
if not FTest(M2bEnt.get()) or not FTest(M2cEnt.get()) or not FTest(M3bEnt.get()) or not FTest(M3cEnt.get()):
SMat = SingleMatrix(0, 0)
else:
Mat2D = (float(M2bEnt.get()),float(M2cEnt.get()),float(M3bEnt.get()), float(M3cEnt.get()))
SMat = SingleMatrix(Mat2D, SMPage.DIM)
elif SMPage.DIM==3:
if not FTest(M1aEnt.get()) or not FTest(M1bEnt.get()) or not FTest(M1cEnt.get()) or not FTest(M2aEnt.get()) or not FTest(M2bEnt.get()) or not FTest(M2cEnt.get())\
or not FTest(M3aEnt.get()) or not FTest(M3bEnt.get()) or not FTest(M3cEnt.get()):
SMat = SingleMatrix(0, 0)
else:
Mat3D = (float(M1aEnt.get()), float(M1bEnt.get()), float(M1cEnt.get()), float(M2aEnt.get()), float(M2bEnt.get()),float(M2cEnt.get()), float(M3aEnt.get()), float(M3bEnt.get()), float(M3cEnt.get()))
SMat = SingleMatrix(Mat3D, SMPage.DIM)
elif SMPage.DIM==4:
if not FTest(M1aEnt.get()) or not FTest(M1bEnt.get()) or not FTest(M1cEnt.get()) or not FTest(M1dEnt.get()) or not FTest(M2aEnt.get()) or not FTest(M2bEnt.get()) or not FTest(M2cEnt.get()) or not FTest(M2dEnt.get())\
or not FTest(M3aEnt.get()) or not FTest(M3bEnt.get()) or not FTest(M3cEnt.get()) or not FTest(M3dEnt.get()) or not FTest(M4aEnt.get()) or not FTest(M4bEnt.get()) or not FTest(M4cEnt.get()) or not FTest(M4dEnt.get()):
SMat = SingleMatrix(0, 0)
else:
Mat4D = (float(M1aEnt.get()), float(M1bEnt.get()), float(M1cEnt.get()), float(M1dEnt.get()), float(M2aEnt.get()), float(M2bEnt.get()), float(M2cEnt.get()), float(M2dEnt.get()),
float(M3aEnt.get()), float(M3bEnt.get()), float(M3cEnt.get()), float(M3dEnt.get()), float(M4aEnt.get()), float(M4bEnt.get()), float(M4cEnt.get()), float(M4dEnt.get()))
SMat = SingleMatrix(Mat4D, SMPage.DIM)
else:
SMat = SingleMatrix(0, 0)
return SMat
def FTest(x):
return x.lstrip('-').lstrip('+').replace('.', '', 1).isdigit()
def Avalue():
if FTest(MultiEnt.get()):
value = float(MultiEnt.get())
else:
value = 1
return value
def Bvalue():
if FTest(PowEnt.get()):
value = float(PowEnt.get())
else:
value = 1
return value
def SME():
if SMPage.DIM==2:
if not FTest(M2bEnt.get()) or not FTest(M2cEnt.get()) or not FTest(M3bEnt.get()) or not FTest(M3cEnt.get()):
SMat = DoubleMatrix(0,0,0,0,0)
else:
Mat2D = (float(M2bEnt.get()),float(M2cEnt.get()),float(M3bEnt.get()), float(M3cEnt.get()))
SMat = DoubleMatrix(Mat2D,0,SMPage.DIM,Avalue(),Bvalue())
elif SMPage.DIM==3:
if not FTest(M1aEnt.get()) or not FTest(M1bEnt.get()) or not FTest(M1cEnt.get()) or not FTest(M2aEnt.get()) or not FTest(M2bEnt.get()) or not FTest(M2cEnt.get())\
or not FTest(M3aEnt.get()) or not FTest(M3bEnt.get()) or not FTest(M3cEnt.get()):
SMat = DoubleMatrix(0,0,0,0,0)
else:
Mat3D = (float(M1aEnt.get()), float(M1bEnt.get()), float(M1cEnt.get()), float(M2aEnt.get()), float(M2bEnt.get()),float(M2cEnt.get()), float(M3aEnt.get()), float(M3bEnt.get()), float(M3cEnt.get()))
SMat = DoubleMatrix(Mat3D,0,SMPage.DIM,Avalue(),Bvalue())
elif SMPage.DIM==4:
if not FTest(M1aEnt.get()) or not FTest(M1bEnt.get()) or not FTest(M1cEnt.get()) or not FTest(M1dEnt.get()) or not FTest(M2aEnt.get()) or not FTest(M2bEnt.get()) or not FTest(M2cEnt.get()) or not FTest(M2dEnt.get())\
or not FTest(M3aEnt.get()) or not FTest(M3bEnt.get()) or not FTest(M3cEnt.get()) or not FTest(M3dEnt.get()) or not FTest(M4aEnt.get()) or not FTest(M4bEnt.get()) or not FTest(M4cEnt.get()) or not FTest(M4dEnt.get()):
SMat = DoubleMatrix(0,0,0,0,0)
else:
Mat4D = (float(M1aEnt.get()), float(M1bEnt.get()), float(M1cEnt.get()), float(M1dEnt.get()), float(M2aEnt.get()), float(M2bEnt.get()), float(M2cEnt.get()), float(M2dEnt.get()),
float(M3aEnt.get()), float(M3bEnt.get()), float(M3cEnt.get()), float(M3dEnt.get()), float(M4aEnt.get()), float(M4bEnt.get()), float(M4cEnt.get()), float(M4dEnt.get()),)
SMat = DoubleMatrix(Mat4D,0,SMPage.DIM,Avalue(),Bvalue())
else:
SMat = DoubleMatrix(0,0,0,0,0)
return SMat
def RESULTlift():
Result.lift()
Result.update()
def DtmAct():
Result.configure(font=("PragmataPro", 14), anchor='n', text=SM().Determinant())
RESULTlift()
def InvAct():
Result1.lift()
Result2.lift()
Result1.configure(font=("Lucida Console", 8), anchor='ne', text=SM().Inverse())
Result1.update()
Result2.configure(font=("Lucida Console", 8), anchor='nw', text=SM().InverseRight())
Result2.update()
def TrpAct():
Result.configure(font=("Lucida Console", 20), anchor='n', text=SM().Transpose())
RESULTlift()
def ScaAct():
Result.configure(font=("Menlo", 17), anchor='n', text=SM().Scalar())
RESULTlift()
def MulAct():
Result.configure(font=("PragmataPro", 18), anchor='n', text=SME().MultiplyBy())
RESULTlift()
def TriAct():
Result.configure(font=("Menlo", 15), anchor='n', text=SM().Triangular())
RESULTlift()
def TrcAct():
Result.configure(font=("Lucida Console", 16), anchor='n', text=SM().Trace())
RESULTlift()
def LUDAct():
Result.configure(font=("Menlo", 15), anchor='n', text=SM().LUDec())
RESULTlift()
def RanAct():
Result.configure(font=("PragmataPro", 15), anchor='n', text=SM().Rank())
RESULTlift()
def PowAct():
Result.configure(font=("PragmataPro", 11), anchor='n', text=SME().PowerBy())
RESULTlift()
def BackAct():
BackS = pygame.mixer.Sound("DisneyBack.wav")
BackS.play()
pygame.mixer.music.load("MenuBG.ogg")
pygame.mixer.music.play(-1)
controller.show_frame("MatrixPage")
def InfoAct():
if SMPage.INFO == False:
SMPage.INFO = True
if SMPage.MUTE == False:
InfoS = pygame.mixer.Sound("DisneyInfoButton.wav")
InfoS.play()
InfoPop.lift()
InfoBtn.lift()
else:
SMPage.INFO = False
InfoPop.lower()
def ResetAct():
if SMPage.MUTE == False:
ClearS = pygame.mixer.Sound("DisneyReset.wav")
ClearS.play()
time.sleep(1)
Ent = (M1aEnt, M1bEnt, M1cEnt, M1dEnt, M2aEnt, M2bEnt, M2cEnt, M2dEnt, M3aEnt, M3bEnt, M3cEnt, M3dEnt, M4aEnt, M4bEnt, M4cEnt, M4dEnt)
for i in range(16):
Ent[i].delete(0, END)
Ent[i].lower()
def MuteAct():
if SMPage.MUTE == True:
SMPage.MUTE = False
pygame.mixer.music.load("MickeyMouse.ogg")
pygame.mixer.music.play(-1)
MuteOff.lower()
else:
SMPage.MUTE = True
pygame.mixer.music.stop()
MuteOff.lift()
def RandAct():
if SMPage.MUTE == False:
RandomS = pygame.mixer.Sound("DisneyRandom.wav")
RandomS.play()
runRandAct()
def runRandAct():
time.sleep(2)
Ent = (M1aEnt, M1bEnt, M1cEnt, M1dEnt, M2aEnt, M2bEnt, M2cEnt, M2dEnt, M3aEnt, M3bEnt, M3cEnt, M3dEnt, M4aEnt,M4bEnt, M4cEnt, M4dEnt)
if SMPage.DIM==2:
for i in range(5,7):
Ent[i].delete(0, END)
Ent[i].insert(1, random.randrange(-9, 10))
for i in range(9, 11):
Ent[i].delete(0, END)
Ent[i].insert(1, random.randrange(-9, 10))
elif SMPage.DIM==3:
for i in range(0,3):
Ent[i].delete(0, END)
Ent[i].insert(1, random.randrange(-9, 10))
for i in range(4, 7):
Ent[i].delete(0, END)
Ent[i].insert(1, random.randrange(-9, 10))
for i in range(8, 11):
Ent[i].delete(0, END)
Ent[i].insert(1, random.randrange(-9, 10))
elif SMPage.DIM==4:
for i in range(16):
Ent[i].delete(0, END)
Ent[i].insert(1, random.randrange(-9, 10))
else:
SMPage.DIM=0
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import multiprocessing
import sys
import unittest
from io import StringIO
import click
import tempfile
from fissix import pygram, pytree
from fissix.pgen2.driver import Driver
from bowler import Query
from bowler.types import LN, SYMBOL, TOKEN
class BowlerTestCase(unittest.TestCase):
"""Subclass of TestCase that captures stdout and makes it easier to run Bowler."""
def setUp(self):
self.buffer = StringIO()
# Replace the write method instead of stdout so that already-existing
# loggers end up writing here.
sys.stdout._saved_write = sys.stdout.write
sys.stdout.write = self.buffer.write
sys.stdout._saved_isatty = sys.stdout.isatty
sys.stdout.isatty = lambda: False
def tearDown(self):
if hasattr(sys.stdout, "_saved_write"):
sys.stdout.write = sys.stdout._saved_write
del sys.stdout._saved_write
if hasattr(sys.stdout, "_saved_isatty"):
sys.stdout.isatty = sys.stdout._saved_isatty
del sys.stdout._saved_isatty
def _formatMessage(self, msg1, msg2):
stdout_text = self.buffer.getvalue()
msg = msg1 or msg2
if stdout_text:
msg += "\n"
msg += "-" * 20 + "< captured stdout >" + "-" * 20 + "\n"
msg += stdout_text + "\n"
msg += "-" * 20 + "< end stdout >" + "-" * 20 + "\n"
return msg
def run_bowler_modifier(
self,
input_text,
selector=None,
modifier=None,
selector_func=None,
modifier_func=None,
in_process=True,
query_func=None,
):
"""Returns the modified text."""
if not (selector or selector_func or query_func):
raise ValueError("Pass selector")
if not (modifier or modifier_func or query_func):
raise ValueError("Pass modifier")
exception_queue = multiprocessing.Queue()
def store_exceptions_on(func):
@functools.wraps(func)
def inner(node, capture, filename):
# When in_process=False, this runs in another process. See notes below.
try:
return func(node, capture, filename)
except Exception as e:
exception_queue.put(e)
return inner
def default_query_func(files):
if selector_func:
q = selector_func(files)
else:
q = Query(files).select(selector)
if modifier_func:
q = modifier_func(q)
else:
q = q.modify(modifier)
return q
if query_func is None:
query_func = default_query_func
with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False) as f:
# TODO: I'm almost certain this will not work on Windows, since
# NamedTemporaryFile has it already open for writing. Consider
# using mktemp directly?
f.write(input_text + "\n")
f.close()
query = query_func([f.name])
assert query is not None, "Remember to return the Query"
assert query.retcode is None, "Return before calling .execute"
assert len(query.transforms) == 1, "TODO: Support multiple"
for i in range(len(query.current.callbacks)):
query.current.callbacks[i] = store_exceptions_on(
query.current.callbacks[i]
)
# We require the in_process parameter in order to record coverage properly,
# but it also helps in bubbling exceptions and letting tests read state set
# by modifiers.
query.execute(
interactive=False, write=True, silent=False, in_process=in_process
)
# In the case of in_process=False (mirroring normal use of the tool) we use
# the queue to ship back exceptions from local_process, which can actually
# fail the test. Normally exceptions in modifiers are not printed
# at all unless you pass --debug, and even then you don't get the
# traceback.
# See https://github.com/facebookincubator/Bowler/issues/63
if not exception_queue.empty():
raise AssertionError from exception_queue.get()
with open(f.name, "r") as fr:
return fr.read().rstrip()
def run_bowler_modifiers(
self, cases, selector=None, modifier=None, query_func=None
):
for input, expected in cases:
with self.subTest(input):
output = self.run_bowler_modifier(
input, selector, modifier, query_func=query_func
)
self.assertMultiLineEqual(expected, output)
def parse_line(self, source: str) -> LN:
grammar = pygram.python_grammar_no_print_statement
driver = Driver(grammar, convert=pytree.convert)
# Skip file_input, simple_stmt
return driver.parse_string(source + "\n").children[0].children[0]
class BowlerTestCaseTest(BowlerTestCase):
def test_stdout_capture(self):
print("hi")
print("there")
self.assertIn("hi\n", self.buffer.getvalue())
def test_stdout_click_no_colors(self):
# This tests that we patched isatty correctly.
click.echo(click.style("hi", fg="red", bold=True))
self.assertEqual("hi\n", self.buffer.getvalue())
def test_run_bowler_modifier(self):
input = "x=a*b"
selector = "term< any op='*' any >"
def modifier(node, capture, filename):
capture["op"].value = "/"
capture["op"].changed()
output = self.run_bowler_modifier(input, selector, modifier)
self.assertEqual("x=a/b", output)
def test_run_bowler_modifier_parse_error(self):
input = " if x:\n bad"
selector = "any"
output = self.run_bowler_modifier(input, selector, lambda *args: None)
self.assertFalse("None" in output)
def test_run_bowler_modifier_query_func(self):
input = "x=a*b"
selector = "term< any op='*' any >"
def modifier(node, capture, filename):
capture["op"].value = "/"
capture["op"].changed()
def query_func(arg):
return Query(arg).select(selector).modify(modifier)
output = self.run_bowler_modifier(input, query_func=query_func)
self.assertEqual("x=a/b", output)
def test_run_bowler_modifier_modifier_func(self):
input = "x=a*b"
selector = "term< any op='*' any >"
def selector_func(arg):
return Query(arg).select(selector)
def modifier(node, capture, filename):
capture["op"].value = "/"
capture["op"].changed()
def modifier_func(q):
return q.modify(modifier)
output = self.run_bowler_modifier(
input, selector_func=selector_func, modifier_func=modifier_func
)
self.assertEqual("x=a/b", output)
def test_run_bowler_modifier_ferries_exception(self):
input = "x=a*b"
selector = "term< any op='*' any >"
def modifier(not_enough_args):
pass
# Should work in both modes
self.assertRaises(
AssertionError,
lambda: self.run_bowler_modifier(
input, selector, modifier, in_process=False
),
)
self.assertRaises(
AssertionError,
lambda: self.run_bowler_modifier(
input, selector, modifier, in_process=True
),
)
def test_parse_line_leaf(self):
input = "2.5"
tree = self.parse_line(input)
self.assertEqual(TOKEN.NUMBER, tree.type)
self.assertEqual("2.5", tree.value)
def test_parse_line_node(self):
input = "x = (y+1)"
tree = self.parse_line(input)
self.assertEqual(SYMBOL.expr_stmt, tree.type)
self.assertEqual(TOKEN.NAME, tree.children[0].type)
self.assertEqual(TOKEN.EQUAL, tree.children[1].type)
self.assertEqual(SYMBOL.atom, tree.children[2].type)
self.assertEqual("x", tree.children[0].value)
|
nilq/baby-python
|
python
|
import random
import json
import requests
from flask import Flask, request, render_template
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def home():
rcomb = request.args.get('rcomb','http://appcombiner:5003/rcomb')
return render_template('index.html', rcomb=str(rcomb))
if __name__=='__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
|
nilq/baby-python
|
python
|
from django.urls import reverse
from django.views import generic
from . import forms
class KindeditorFormView(generic.FormView):
form_class = forms.KindeditorForm
template_name = "form.html"
def get_success_url(self):
return reverse("kindeditor-form")
kindeditor_form_view = KindeditorFormView.as_view()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2003 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from __future__ import absolute_import
from builtins import str
from .Property import Property
class String(Property):
def __init__(
self, name, default="", public=None, validator=None, tip="", doc=""
):
super(String, self).__init__(
name, default, public, validator, tip, doc
)
self.type = "str"
return
def _cast(self, value):
return str(value)
# version
__id__ = "$Id$"
# End of file
|
nilq/baby-python
|
python
|
from pathlib import Path
from typing import List
def get_asset(name: str) -> Path:
try:
path = next(Path(__file__).parent.rglob(name))
except StopIteration:
raise FileNotFoundError(name)
return path
def find_asset(name: str) -> List[Path]:
paths = list(Path(__file__).parent.rglob(name))
if not paths:
raise FileNotFoundError(name)
return paths
|
nilq/baby-python
|
python
|
import pandas as pd
import numpy as np
from scipy.stats import multivariate_normal
from sklearn.metrics import accuracy_score
def bayesiano(classes,x_train,y_train,x_test,y_test):
#### Realiza a classificacao ####
# Matriz que armazena as probabilidades para cada classe
P = pd.DataFrame(data=np.zeros((x_train.shape[0], len(classes))), columns = classes)
Pc = np.zeros(len(classes)) # Armaze a fracao de elementos em cada classe
for i in np.arange(0, len(classes)): # Para cada classe
elements = tuple(np.where(y_train == classes[i])) # elmentos na classe i
Pc[i] = len(elements)/len(y_train) # Probabilidade pertencer a classe i
Z = x_train[elements,:][0] # Elementos no conjunto de treinamento
m = np.mean(Z, axis = 0) # Vetor media
cv = np.cov(np.transpose(Z)) # Matriz de covariancia
for j in np.arange(0,x_test.shape[0]): # para cada observacao no conjunto de teste
x = x_test[j,:]
# calcula a probabilidade pertencer a cada classe
pj = multivariate_normal.pdf(x, mean=m, cov=cv, allow_singular=True)
P[classes[i]][j] = pj*Pc[i]
y_pred = [] # Vetor com as classes preditas
for i in np.arange(0, x_test.shape[0]):
c = np.argmax(np.array(P.iloc[[i]]))
y_pred.append(classes[c])
#y_pred = np.array(y_pred, dtype=str)
# calcula a acuracia
#score = accuracy_score(y_pred, y_test)
return y_test,y_pred
|
nilq/baby-python
|
python
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.ads.google_ads.v3.proto.resources import bidding_strategy_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_bidding__strategy__pb2
from google.ads.google_ads.v3.proto.services import bidding_strategy_service_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_services_dot_bidding__strategy__service__pb2
class BiddingStrategyServiceStub(object):
"""Proto file describing the Bidding Strategy service.
Service to manage bidding strategies.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetBiddingStrategy = channel.unary_unary(
'/google.ads.googleads.v3.services.BiddingStrategyService/GetBiddingStrategy',
request_serializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_services_dot_bidding__strategy__service__pb2.GetBiddingStrategyRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_bidding__strategy__pb2.BiddingStrategy.FromString,
)
self.MutateBiddingStrategies = channel.unary_unary(
'/google.ads.googleads.v3.services.BiddingStrategyService/MutateBiddingStrategies',
request_serializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_services_dot_bidding__strategy__service__pb2.MutateBiddingStrategiesRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_services_dot_bidding__strategy__service__pb2.MutateBiddingStrategiesResponse.FromString,
)
class BiddingStrategyServiceServicer(object):
"""Proto file describing the Bidding Strategy service.
Service to manage bidding strategies.
"""
def GetBiddingStrategy(self, request, context):
"""Returns the requested bidding strategy in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def MutateBiddingStrategies(self, request, context):
"""Creates, updates, or removes bidding strategies. Operation statuses are
returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_BiddingStrategyServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetBiddingStrategy': grpc.unary_unary_rpc_method_handler(
servicer.GetBiddingStrategy,
request_deserializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_services_dot_bidding__strategy__service__pb2.GetBiddingStrategyRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_bidding__strategy__pb2.BiddingStrategy.SerializeToString,
),
'MutateBiddingStrategies': grpc.unary_unary_rpc_method_handler(
servicer.MutateBiddingStrategies,
request_deserializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_services_dot_bidding__strategy__service__pb2.MutateBiddingStrategiesRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v3_dot_proto_dot_services_dot_bidding__strategy__service__pb2.MutateBiddingStrategiesResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v3.services.BiddingStrategyService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
nilq/baby-python
|
python
|
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=
r"""
This file contains QANet model and all used layers.
"""
import math
import mxnet as mx
from mxnet import gluon, nd
from mxnet.initializer import MSRAPrelu, Normal, Uniform, Xavier
from gluonnlp.initializer import HighwayBias
from gluonnlp.model import (ConvolutionalEncoder, DotProductAttentionCell,
Highway, MultiHeadAttentionCell)
from config import opt
from util import mask_logits
class MySoftmaxCrossEntropy(gluon.loss.Loss):
r"""Caluate the sum of softmax cross entropy.
Reference:
http://mxnet.incubator.apache.org/api/python/gluon/loss.html#mxnet.gluon.loss.SoftmaxCrossEntropyLoss
Parameters
----------
axis : int, default -1
The axis to sum over when computing softmax and entropy.
sparse_label : bool, default True
Whether label is an integer array instead of probalbility distribution.
from_logits : bool, default False
Whether input is a log probability (usually from log_softmax) instead of
unnormalized numbers.
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
"""
def __init__(self, axis=-1, sparse_label=True, from_logits=False, weight=None, batch_axis=0,
**kwargs):
super(MySoftmaxCrossEntropy, self).__init__(
weight, batch_axis, **kwargs)
self.loss = gluon.loss.SoftmaxCrossEntropyLoss(axis=axis,
sparse_label=sparse_label,
from_logits=from_logits,
weight=weight,
batch_axis=batch_axis)
def forward(self, predict_begin, predict_end, label_begin, label_end):
r"""Implement forward computation.
Parameters
-----------
predict_begin : NDArray
Predicted probability distribution of answer begin position,
input tensor with shape `(batch_size, sequence_length)`
predict_end : NDArray
Predicted probability distribution of answer end position,
input tensor with shape `(batch_size, sequence_length)`
label_begin : NDArray
True label of the answer begin position,
input tensor with shape `(batch_size, )`
label_end : NDArray
True label of the answer end position,
input tensor with shape `(batch_size, )`
Returns
--------
out: NDArray
output tensor with shape `(batch_size, )`
"""
return self.loss(predict_begin, label_begin) + self.loss(predict_end, label_end)
class QANet(gluon.HybridBlock):
r"""QANet model.
We implemented the QANet proposed in the following work::
@article{DBLP:journals/corr/abs-1804-09541,
author = {Adams Wei Yu and
David Dohan and
Minh{-}Thang Luong and
Rui Zhao and
Kai Chen and
Mohammad Norouzi and
Quoc V. Le},
title = {QANet: Combining Local Convolution with Global Self-Attention for
Reading Comprehension},
year = {2018},
url = {http://arxiv.org/abs/1804.09541}
}
"""
def __init__(self, **kwargs):
super(QANet, self).__init__(**kwargs)
with self.name_scope():
self.flatten = gluon.nn.Flatten()
self.dropout = gluon.nn.Dropout(opt.layers_dropout)
self.char_conv = ConvolutionalEncoder(
embed_size=opt.char_emb_dim,
num_filters=opt.char_conv_filters,
ngram_filter_sizes=opt.char_conv_ngrams,
conv_layer_activation=None,
num_highway=0
)
self.highway = gluon.nn.HybridSequential()
with self.highway.name_scope():
self.highway.add(
gluon.nn.Dense(
units=opt.emb_encoder_conv_channels,
flatten=False,
use_bias=False,
weight_initializer=Xavier()
)
)
self.highway.add(
Highway(
input_size=opt.emb_encoder_conv_channels,
num_layers=opt.highway_layers,
activation='relu',
highway_bias=HighwayBias(
nonlinear_transform_bias=0.0,
transform_gate_bias=0.0
)
)
)
self.word_emb = gluon.nn.HybridSequential()
with self.word_emb.name_scope():
self.word_emb.add(
gluon.nn.Embedding(
input_dim=opt.word_corpus,
output_dim=opt.word_emb_dim
)
)
self.word_emb.add(
gluon.nn.Dropout(rate=opt.word_emb_dropout)
)
self.char_emb = gluon.nn.HybridSequential()
with self.char_emb.name_scope():
self.char_emb.add(
gluon.nn.Embedding(
input_dim=opt.character_corpus,
output_dim=opt.char_emb_dim,
weight_initializer=Normal(sigma=0.1)
)
)
self.char_emb.add(
gluon.nn.Dropout(rate=opt.char_emb_dropout)
)
with self.name_scope():
self.emb_encoder = Encoder(
kernel_size=opt.emb_encoder_conv_kernerl_size,
num_filters=opt.emb_encoder_conv_channels,
conv_layers=opt.emb_encoder_num_conv_layers,
num_heads=opt.emb_encoder_num_head,
num_blocks=opt.emb_encoder_num_block
)
self.project = gluon.nn.Dense(
units=opt.emb_encoder_conv_channels,
flatten=False,
use_bias=False,
weight_initializer=Xavier()
)
with self.name_scope():
self.co_attention = CoAttention()
with self.name_scope():
self.model_encoder = Encoder(
kernel_size=opt.model_encoder_conv_kernel_size,
num_filters=opt.model_encoder_conv_channels,
conv_layers=opt.model_encoder_conv_layers,
num_heads=opt.model_encoder_num_head,
num_blocks=opt.model_encoder_num_block
)
with self.name_scope():
self.predict_begin = gluon.nn.Dense(
units=1,
use_bias=True,
flatten=False,
weight_initializer=Xavier(
rnd_type='uniform', factor_type='in', magnitude=1),
bias_initializer=Uniform(1.0/opt.model_encoder_conv_channels)
)
self.predict_end = gluon.nn.Dense(
units=1,
use_bias=True,
flatten=False,
weight_initializer=Xavier(
rnd_type='uniform', factor_type='in', magnitude=1),
bias_initializer=Uniform(1.0/opt.model_encoder_conv_channels)
)
def hybrid_forward(self, F, context, query, context_char, query_char,
y_begin, y_end):
r"""Implement forward computation.
Parameters
-----------
context : NDArray
input tensor with shape `(batch_size, context_sequence_length)`
query : NDArray
input tensor with shape `(batch_size, query_sequence_length)`
context_char : NDArray
input tensor with shape `(batch_size, context_sequence_length, num_char_per_word)`
query_char : NDArray
input tensor with shape `(batch_size, query_sequence_length, num_char_per_word)`
y_begin : NDArray
input tensor with shape `(batch_size, )`
y_end : NDArray
input tensor with shape `(batch_size, )`
Returns
--------
predicted_begin : NDArray
output tensor with shape `(batch_size, context_sequence_length)`
predicted_end : NDArray
output tensor with shape `(batch_size, context_sequence_length)`
"""
(batch, _) = context.shape
context_mask = context > 0
query_mask = query > 0
context_max_len = int(context_mask.sum(axis=1).max().asscalar())
query_max_len = int(query_mask.sum(axis=1).max().asscalar())
context = F.slice(context, begin=(0, 0), end=(batch, context_max_len))
query = F.slice(query, begin=(0, 0), end=(batch, query_max_len))
context_mask = F.slice(
context_mask,
begin=(0, 0),
end=(batch, context_max_len)
)
query_mask = F.slice(
query_mask,
begin=(0, 0),
end=(batch, query_max_len)
)
context_char = F.slice(
context_char,
begin=(0, 0, 0),
end=(batch, context_max_len, opt.max_character_per_word)
)
query_char = F.slice(
query_char,
begin=(0, 0, 0),
end=(batch, query_max_len, opt.max_character_per_word)
)
# word embedding
context_word_emb = self.word_emb(context)
query_word_emb = self.word_emb(query)
# char embedding
context_char_flat = self.flatten(context_char)
query_char_flat = self.flatten(query_char)
context_char_emb = self.char_emb(context_char_flat)
query_char_emb = self.char_emb(query_char_flat)
context_char_emb = F.reshape(
context_char_emb,
shape=(
batch*context_max_len,
opt.max_character_per_word,
opt.char_emb_dim
)
)
query_char_emb = F.reshape(
query_char_emb,
shape=(
batch*query_max_len,
opt.max_character_per_word,
opt.char_emb_dim
)
)
context_char_emb = F.transpose(context_char_emb, axes=(1, 0, 2))
query_char_emb = F.transpose(query_char_emb, axes=(1, 0, 2))
context_char_emb = self.char_conv(context_char_emb)
query_char_emb = self.char_conv(query_char_emb)
context_char_emb = F.reshape(
context_char_emb,
shape=(
batch,
context_max_len,
context_char_emb.shape[-1]
)
)
query_char_emb = F.reshape(
query_char_emb,
shape=(
batch,
query_max_len,
query_char_emb.shape[-1]
)
)
# concat word and char embedding
context_concat = F.concat(context_word_emb, context_char_emb, dim=-1)
query_concat = F.concat(query_word_emb, query_char_emb, dim=-1)
# highway net
context_final_emb = self.highway(context_concat)
query_final_emb = self.highway(query_concat)
# embedding encoder
# share the weights between passage and question
context_emb_encoded = self.emb_encoder(context_final_emb, context_mask)
query_emb_encoded = self.emb_encoder(query_final_emb, query_mask)
# context-query attention layer
M = self.co_attention(context_emb_encoded, query_emb_encoded, context_mask,
query_mask, context_max_len, query_max_len)
M = self.project(M)
M = self.dropout(M)
# model encoder layer
M_0 = self.model_encoder(M, context_mask)
M_1 = self.model_encoder(M_0, context_mask)
M_2 = self.model_encoder(M_1, context_mask)
# predict layer
begin_hat = self.flatten(
self.predict_begin(F.concat(M_0, M_1, dim=-1)))
end_hat = self.flatten(self.predict_end(F.concat(M_0, M_2, dim=-1)))
predicted_begin = mask_logits(begin_hat, context_mask)
predicted_end = mask_logits(end_hat, context_mask)
return predicted_begin, predicted_end, y_begin, y_end
class Encoder(gluon.HybridBlock):
r"""
Stacked block of Embedding encoder or Model encoder.
"""
def __init__(self, kernel_size, num_filters, conv_layers=2, num_heads=8,
num_blocks=1, **kwargs):
super(Encoder, self).__init__(**kwargs)
self.dropout = gluon.nn.Dropout(opt.layers_dropout)
total_layers = float((conv_layers + 2) * num_blocks)
sub_layer_idx = 1
self.num_blocks = num_blocks
self.stack_encoders = gluon.nn.HybridSequential()
with self.stack_encoders.name_scope():
for _ in range(num_blocks):
self.stack_encoders.add(
OneEncoderBlock(
kernel_size=kernel_size,
num_filters=num_filters,
conv_layers=conv_layers,
num_heads=num_heads,
total_layers=total_layers,
sub_layer_idx=sub_layer_idx
)
)
sub_layer_idx += (conv_layers + 2)
def hybrid_forward(self, F, x, mask):
r"""Implement forward computation.
Parameters
-----------
x : NDArray
input tensor with shape `(batch_size, sequence_length, features)`
mask : NDArray
input tensor with shape `(batch_size, sequence_length)`
Returns, NDArray
--------
output tensor with shape `(batch_size, sequence_length, features)`
"""
for encoder in self.stack_encoders:
x = encoder(x, mask)
x = F.Dropout(x, p=opt.layers_dropout)
return x
class OneEncoderBlock(gluon.HybridBlock):
r"""The basic encoder block.
Parameters
----------
kernel_size : int
The kernel size for all depthwise convolution layers.
num_filters : int
The number of filters for all convolution layers.
conv_layers : int
The number of convolution layers in one encoder block.
num_heads : int
The number of heads in multi-head attention layer.
total_layers : int
sub_layer_idx : int
The sub_layer_idx / total_layers is the dropout probability for layer.
"""
def __init__(self, kernel_size, num_filters, conv_layers, num_heads, total_layers,
sub_layer_idx, **kwargs):
super(OneEncoderBlock, self).__init__(**kwargs)
self.position_encoder = PositionEncoder()
self.convs = gluon.nn.HybridSequential()
with self.convs.name_scope():
for _ in range(conv_layers):
one_conv_module = gluon.nn.HybridSequential()
with one_conv_module.name_scope():
one_conv_module.add(
gluon.nn.LayerNorm(epsilon=1e-06)
)
one_conv_module.add(
gluon.nn.Dropout(opt.layers_dropout)
)
one_conv_module.add(
DepthwiseConv(
kernel_size=kernel_size,
num_filters=num_filters,
input_channels=num_filters
)
)
one_conv_module.add(
StochasticDropoutLayer(
dropout=(sub_layer_idx / total_layers) *
(1 - opt.p_l)
)
)
sub_layer_idx += 1
self.convs.add(one_conv_module)
with self.name_scope():
self.dropout = gluon.nn.Dropout(opt.layers_dropout)
self.attention = SelfAttention(num_heads=num_heads)
self.attention_dropout = StochasticDropoutLayer(
(sub_layer_idx / total_layers) * (1 - opt.p_l))
sub_layer_idx += 1
self.attention_layer_norm = gluon.nn.LayerNorm(epsilon=1e-06)
self.positionwise_ffn = gluon.nn.HybridSequential()
with self.positionwise_ffn.name_scope():
self.positionwise_ffn.add(
gluon.nn.LayerNorm(epsilon=1e-06)
)
self.positionwise_ffn.add(
gluon.nn.Dropout(rate=opt.layers_dropout)
)
self.positionwise_ffn.add(
gluon.nn.Dense(
units=opt.emb_encoder_conv_channels,
activation='relu',
use_bias=True,
weight_initializer=MSRAPrelu(),
flatten=False
)
)
self.positionwise_ffn.add(
gluon.nn.Dense(
units=opt.emb_encoder_conv_channels,
use_bias=True,
weight_initializer=Xavier(),
flatten=False
)
)
self.positionwise_ffn.add(
StochasticDropoutLayer(
dropout=(sub_layer_idx / total_layers) * (1 - opt.p_l)
)
)
def hybrid_forward(self, F, x, mask):
r"""Implement forward computation.
Parameters
-----------
x : NDArray
input tensor with shape `(batch_size, sequence_length, hidden_size)`
mask : NDArray
input tensor with shape `(batch_size, sequence_length)`
Returns
--------
x : NDArray
output tensor with shape `(batch_size, sequence_length, hidden_size)`
mask : NDArray
output tensor with shape `(batch_size, sequence_length)`
"""
x = self.position_encoder(x)
for conv in self.convs:
residual = x
x = conv(x) + residual
residual = x
x = self.attention_layer_norm(x)
x = F.Dropout(x, p=opt.layers_dropout)
x = self.attention(x, mask)
x = self.attention_dropout(x) + residual
return x + self.positionwise_ffn(x)
class StochasticDropoutLayer(gluon.HybridBlock):
r"""
Stochastic dropout a layer.
"""
def __init__(self, dropout, **kwargs):
super(StochasticDropoutLayer, self).__init__(**kwargs)
self.dropout = dropout
with self.name_scope():
self.dropout_fn = gluon.nn.Dropout(dropout)
def hybrid_forward(self, F, inputs):
if F.random.uniform().asscalar() < self.dropout:
return F.zeros(shape=(1,))
else:
return self.dropout_fn(inputs)
class SelfAttention(gluon.HybridBlock):
r"""
Implementation of self-attention with gluonnlp.model.MultiHeadAttentionCell
"""
def __init__(self, num_heads, **kwargs):
super(SelfAttention, self).__init__(**kwargs)
with self.name_scope():
self.attention = MultiHeadAttentionCell(
num_heads=num_heads,
base_cell=DotProductAttentionCell(
scaled=True,
dropout=opt.layers_dropout,
use_bias=False
),
query_units=opt.emb_encoder_conv_channels,
key_units=opt.emb_encoder_conv_channels,
value_units=opt.emb_encoder_conv_channels,
use_bias=False,
weight_initializer=Xavier()
)
def hybrid_forward(self, F, x, mask):
r"""Implement forward computation.
Parameters
-----------
x : NDArray
input tensor with shape `(batch_size, sequence_length, hidden_size)`
mask : NDArray
input tensor with shape `(batch_size, sequence_length)`
Returns
--------
x : NDArray
output tensor with shape `(batch_size, sequence_length, hidden_size)`
"""
mask = F.batch_dot(mask.expand_dims(axis=2), mask.expand_dims(axis=1))
return self.attention(x, x, mask=mask)[0]
class PositionEncoder(gluon.HybridBlock):
r"""
An implementation of position encoder.
"""
def __init__(self, **kwargs):
super(PositionEncoder, self).__init__(**kwargs)
with self.name_scope():
pass
def hybrid_forward(self, F, x, min_timescale=1.0, max_timescale=1e4):
r"""Implement forward computation.
Parameters
-----------
x : NDArray
input tensor with shape `(batch_size, sequence_length, hidden_size)`
Returns
--------
: NDArray
output tensor with shape `(batch_size, sequence_length, hidden_size)`
"""
length = x.shape[1]
channels = x.shape[2]
position = nd.array(range(length))
num_timescales = channels // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) / (num_timescales - 1))
inv_timescales = min_timescale * \
nd.exp(nd.array(range(num_timescales)) * -log_timescale_increment)
scaled_time = F.expand_dims(
position, 1) * F.expand_dims(inv_timescales, 0)
signal = F.concat(F.sin(scaled_time), F.cos(scaled_time), dim=1)
signal = F.reshape(signal, shape=(1, length, channels))
return x + signal.as_in_context(x.context)
class DepthwiseConv(gluon.HybridBlock):
r"""
An implementation of depthwise-convolution net.
"""
def __init__(self, kernel_size, num_filters, input_channels, **kwargs):
super(DepthwiseConv, self).__init__(**kwargs)
with self.name_scope():
self.depthwise_conv = gluon.nn.Conv1D(
channels=input_channels,
kernel_size=kernel_size,
padding=kernel_size // 2,
groups=input_channels,
use_bias=False,
weight_initializer=MSRAPrelu()
)
self.pointwise_conv = gluon.nn.Conv1D(
channels=num_filters,
kernel_size=1,
activation='relu',
use_bias=True,
weight_initializer=MSRAPrelu(),
bias_initializer='zeros'
)
def hybrid_forward(self, F, inputs):
r"""Implement forward computation.
Parameters
-----------
inputs : NDArray
input tensor with shape `(batch_size, sequence_length, hidden_size)`
Returns
--------
x : NDArray
output tensor with shape `(batch_size, sequence_length, new_hidden_size)`
"""
tmp = F.transpose(inputs, axes=(0, 2, 1))
depthwise_conv = self.depthwise_conv(tmp)
outputs = self.pointwise_conv(depthwise_conv)
return F.transpose(outputs, axes=(0, 2, 1))
class CoAttention(gluon.HybridBlock):
r"""
An implementation of co-attention block.
"""
def __init__(self, **kwargs):
super(CoAttention, self).__init__(**kwargs)
with self.name_scope():
self.w4c = gluon.nn.Dense(
units=1,
flatten=False,
weight_initializer=Xavier(),
use_bias=False
)
self.w4q = gluon.nn.Dense(
units=1,
flatten=False,
weight_initializer=Xavier(),
use_bias=False
)
self.w4mlu = self.params.get(
'linear_kernel', shape=(1, 1, opt.emb_encoder_conv_channels), init=mx.init.Xavier())
self.bias = self.params.get(
'coattention_bias', shape=(1,), init=mx.init.Zero())
def hybrid_forward(self, F, context, query, context_mask, query_mask,
context_max_len, query_max_len, w4mlu, bias):
"""Implement forward computation.
Parameters
-----------
context : NDArray
input tensor with shape `(batch_size, context_sequence_length, hidden_size)`
query : NDArray
input tensor with shape `(batch_size, query_sequence_length, hidden_size)`
context_mask : NDArray
input tensor with shape `(batch_size, context_sequence_length)`
query_mask : NDArray
input tensor with shape `(batch_size, query_sequence_length)`
context_max_len : int
query_max_len : int
Returns
--------
return : NDArray
output tensor with shape `(batch_size, context_sequence_length, 4*hidden_size)`
"""
context_mask = F.expand_dims(context_mask, axis=-1)
query_mask = F.expand_dims(query_mask, axis=1)
similarity = self._calculate_trilinear_similarity(
context, query, context_max_len, query_max_len, w4mlu, bias)
similarity_dash = F.softmax(mask_logits(similarity, query_mask))
similarity_dash_trans = F.transpose(F.softmax(
mask_logits(similarity, context_mask), axis=1), axes=(0, 2, 1))
c2q = F.batch_dot(similarity_dash, query)
q2c = F.batch_dot(F.batch_dot(
similarity_dash, similarity_dash_trans), context)
return F.concat(context, c2q, context * c2q, context * q2c, dim=-1)
def _calculate_trilinear_similarity(self, context, query, context_max_len, query_max_len,
w4mlu, bias):
"""Implement the computation of trilinear similarity function.
refer https://github.com/NLPLearn/QANet/blob/master/layers.py#L505
The similarity function is:
f(w, q) = W[w, q, w * q]
where w and q represent the word in context and query respectively,
and * operator means hadamard product.
Parameters
-----------
context : NDArray
input tensor with shape `(batch_size, context_sequence_length, hidden_size)`
query : NDArray
input tensor with shape `(batch_size, query_sequence_length, hidden_size)`
context_max_len : int
context_max_len : int
Returns
--------
similarity_mat : NDArray
output tensor with shape `(batch_size, context_sequence_length, query_sequence_length)`
"""
subres0 = nd.tile(self.w4c(context), [1, 1, query_max_len])
subres1 = nd.tile(nd.transpose(
self.w4q(query), axes=(0, 2, 1)), [1, context_max_len, 1])
subres2 = nd.batch_dot(w4mlu * context,
nd.transpose(query, axes=(0, 2, 1)))
similarity_mat = subres0 + subres1 + subres2 + bias
return similarity_mat
|
nilq/baby-python
|
python
|
import re, sys, glob
from os.path import join
from tabulate import tabulate
indir = sys.argv[1]
outfile = join(indir, "xfold_eval.txt")
name2fold = {}
for fold in range(5):
if fold not in name2fold:
name2fold[fold] = {}
file = join(indir, f"fold{fold}/eval.txt")
with open(file, 'r') as f:
tuple_performance, _, instance_performance = f.read().strip().split("Tuple Level")[1].strip().split("\n")
score = tuple_performance.split(",") + instance_performance.split(",")
for i, s in enumerate(score):
s = float(s.split(":")[1].strip())
score[i] = s
tupp, tupr, tupf, insp, insr, insf = score
name2fold[fold] = {
"tupp": tupp,
"tupr": tupr,
"tupf": tupf,
"insp": insp,
"insr": insr,
"insf": insf
}
def avg(l):
return sum(l)/len(l)
tupp = [name2fold[fold]['tupp'] for fold in range(5)]
tupr = [name2fold[fold]['tupr'] for fold in range(5)]
tupf = [name2fold[fold]['tupf'] for fold in range(5)]
insp = [name2fold[fold]['insp'] for fold in range(5)]
insr = [name2fold[fold]['insr'] for fold in range(5)]
insf = [name2fold[fold]['insf'] for fold in range(5)]
to_print = []
with open(outfile,'w') as f:
print(f"Saving to {outfile}")
to_print.append(["SCORE", "TUPLE P", "TUPLE R", "TUPLE F", "INS P", "INS R", "INS F"])
to_print.append(["AVG", avg(tupp), avg(tupr), avg(tupf), avg(insp), avg(insr), avg(insf)])
to_print.append(["---"] * 7)
to_print.extend([[f"FOLD{idx}", tp, tr, tf, ip, ir, iff] for idx, (tp, tr, tf, ip, ir, iff) in enumerate(zip(tupp, tupr, tupf, insp, insr, insf))])
f.write(tabulate(to_print))
|
nilq/baby-python
|
python
|
# coding: utf-8
__doc__ = """包含一些继承自默认Qt控件的自定义行为控件。"""
import os
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QLineEdit, QTextEdit
class QLineEditMod(QLineEdit):
def __init__(self, accept="dir", file_filter=set()):
super().__init__()
self.setContextMenuPolicy(Qt.NoContextMenu)
self._accept = accept
self._filter = file_filter
self._drag_temp = ""
@property
def local_path(self):
return self.text().strip()
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
if self._accept == "file":
self._drag_temp = os.path.realpath(
event.mimeData().urls()[0].toLocalFile()
)
if (
not self._filter
or os.path.splitext(self._drag_temp)[1] in self._filter
):
event.accept()
else:
event.ignore()
elif self._accept == "dir":
event.accept()
else:
event.ignore()
else:
event.ignore()
def dropEvent(self, event):
if not self._drag_temp:
self._drag_temp = os.path.realpath(event.mimeData().urls()[0].toLocalFile())
if self._accept == "file" and os.path.isfile(self._drag_temp):
self.setText(self._drag_temp)
elif self._accept == "dir" and os.path.isdir(self._drag_temp):
self.setText(self._drag_temp)
class QTextEditMod(QTextEdit):
def __init__(self, accept="file", file_filter=set()):
super().__init__()
self.setLineWrapMode(QTextEdit.NoWrap)
self.setContextMenuPolicy(Qt.NoContextMenu)
self._accept = accept
self._filter = file_filter
self._drag_temp = list()
@property
def local_paths(self):
file_dir_paths = self.toPlainText().split("\n")
if self._accept == "dir":
return [path for path in file_dir_paths if os.path.isdir(path)]
if self._accept == "file":
return [path for path in file_dir_paths if os.path.isfile(path)]
return []
def _stash_from_urls(self, urls):
self._drag_temp.clear()
for file_or_dir in (path.toLocalFile() for path in urls):
file_or_dir = os.path.realpath(file_or_dir)
if os.path.isfile(file_or_dir):
self._drag_temp.append(file_or_dir)
continue
self._drag_temp.append(file_or_dir)
for root, _, files in os.walk(file_or_dir):
self._drag_temp.extend(
os.path.join(root, filename) for filename in files
)
def dragEnterEvent(self, event):
self._drag_temp.clear()
if event.mimeData().hasUrls():
if self._accept == "file":
self._stash_from_urls(event.mimeData().urls())
if not self._filter or set(
os.path.splitext(fp)[1]
for fp in self._drag_temp
if os.path.isfile(fp)
).issubset(self._filter):
event.accept()
else:
event.ignore()
elif self._accept == "dir":
event.accept()
else:
event.ignore()
if not self.toPlainText().endswith("\n"):
self.append("")
else:
event.ignore()
def dropEvent(self, event):
cur_text = self.toPlainText()
super().dropEvent(event)
if not self._drag_temp:
self._stash_from_urls(event.mimeData().urls())
if self._accept == "file":
self.setText(
cur_text
+ "\n".join(path for path in self._drag_temp if os.path.isfile(path))
)
elif self._accept == "dir":
self.setText(
cur_text
+ "\n".join(path for path in self._drag_temp if os.path.isdir(path))
)
else:
self.setText("")
self.verticalScrollBar().setValue(self.verticalScrollBar().maximumHeight())
|
nilq/baby-python
|
python
|
"""
A collection of constants including error message
"""
ERROR_CLONE_TIMEOUT_EXPIRED = 'Timeout expired error'
ERROR_CLONE_FAILED = 'Cloning failed error'
|
nilq/baby-python
|
python
|
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
import GafferRenderMan
def __qualitySummary( plug ) :
info = []
if plug["pixelSamples"]["enabled"].getValue() :
ps = plug["pixelSamples"]["value"].getValue()
info.append( "Pixel Samples %dx%d" % ( ps[0], ps[1] ) )
return ", ".join( info )
def __hiderSummary( plug ) :
info = []
if plug["hider"]["enabled"].getValue() :
info.append( plug["hider"]["value"].getValue().capitalize() )
if plug["hiderDepthFilter"]["enabled"].getValue() :
info.append( "Depth Filter : " + plug["hiderDepthFilter"]["value"].getValue().capitalize() )
if plug["hiderJitter"]["enabled"].getValue() :
info.append( "Jitter " + ( "On" if plug["hiderJitter"]["value"].getValue() else "Off" ) )
if plug["hiderSampleMotion"]["enabled"].getValue() :
info.append( "Sample Motion " + ( "On" if plug["hiderSampleMotion"]["value"].getValue() else "Off" ) )
if plug["hiderExtremeMotionDOF"]["enabled"].getValue() :
info.append( "Extreme MDOF " + ( "On" if plug["hiderExtremeMotionDOF"]["value"].getValue() else "Off" ) )
if plug["hiderProgressive"]["enabled"].getValue() :
info.append( "Progressive " + ( "On" if plug["hiderProgressive"]["value"].getValue() else "Off" ) )
return ", ".join( info )
def __statisticsSummary( plug ) :
info = []
if plug["statisticsLevel"]["enabled"].getValue() :
info.append( "Level %d" % plug["statisticsLevel"]["value"].getValue() )
if plug["statisticsFileName"]["enabled"].getValue() :
info.append( "File name" )
if plug["statisticsProgress"]["enabled"].getValue() :
info.append( "Progress " + ( "On" if plug["statisticsProgress"]["value"].getValue() else "Off" ) )
return ", ".join( info )
def __searchPathsSummary( plug ) :
info = []
for childName, label in (
( "shaderSearchPath", "Shaders" ),
( "textureSearchPath", "Textures" ),
( "displaySearchPath", "Displays" ),
( "archiveSearchPath", "Archives" ),
( "proceduralSearchPath", "Procedurals" ),
) :
if plug[childName]["enabled"].getValue() :
info.append( label )
return ", ".join( info )
Gaffer.Metadata.registerNode(
GafferRenderMan.RenderManOptions,
"description",
"""
Sets global scene options applicable to RenderMan
renderers. Use the StandardOptions node to set
global options applicable to all renderers.
""",
plugs = {
# Summaries
"options" : [
"layout:section:Quality:summary", __qualitySummary,
"layout:section:Hider:summary", __hiderSummary,
"layout:section:Statistics:summary", __statisticsSummary,
"layout:section:Search Paths:summary", __searchPathsSummary,
],
# Quality
"options.pixelSamples" : [
"description",
"""
The number of primary samples to divide each pixel into
in the X and Y directions. For example, 3x3 gives a total of
9 samples per pixel. This is the primary quality control for
geometric antialiasing and motion blur.
""",
"layout:section", "Quality",
],
# Hider
"options.hider" : [
"description",
"""
The "Hidden" hider means the classic REYES algorithm
is used, and the "Raytrace" hider means a more modern
raytraced algorithm is used.
""",
"layout:section", "Hider",
],
"options.hider.value" : [
"preset:Hidden", "hidden",
"preset:Raytrace", "raytrace",
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
"options.hiderDepthFilter" : [
"description",
"""
The filter used to compute a single depth
value per pixel from the depths in each
pixel sample.
""",
"layout:section", "Hider",
"label", "Depth Filter",
],
"options.hiderDepthFilter.value" : [
"preset:Min", "min",
"preset:Max", "max",
"preset:Average", "average",
"preset:Midpoint", "midpoint",
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
"options.hiderJitter" : [
"description",
"""
Whether or not each pixel sample is
jittered about the centre of its subpixel
position, or if they're aligned in a
regular grid. If in doubt, leave this on.
""",
"layout:section", "Hider",
"label", "Jitter",
],
"options.hiderSampleMotion" : [
"description",
"""
May be turned off to disable the sampling of
motion blur, but keep motion vectors available
for use in shaders. This is useful for
rendering a motion vector pass to allow
2D motion blur to be applied as a post process.
If you simply wish to turn off motion blur
entirely, then use the motion blur settings
in the StandardOptions node.
""",
"layout:section", "Hider",
"label", "Sample Motion",
],
"options.hiderExtremeMotionDOF" : [
"description",
"""
An alternative sampling algorithm which
is more expensive, but gives higher quality
results when objects are both moving quickly
and are out of focus.
""",
"layout:section", "Hider",
"label", "Extreme Motion DOF",
],
"options.hiderProgressive" : [
"description",
"""
Renders at progressively increasing levels
of quality, to give quick low quality feedback
at the start of an interactive render. Only
applies when the raytrace hider is used.
""",
"layout:section", "Hider",
"label", "Progressive",
],
# Statistics
"options.statisticsLevel" : [
"description",
"""
Determines the verbosity of statistics
output.
""",
"layout:section", "Statistics",
"label", "Level",
],
"options.statisticsLevel.value" : [
"preset:0 (Off)", 0,
"preset:1", 1,
"preset:2", 2,
"preset:3 (Most Verbose)", 3,
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
"options.statisticsFileName" : [
"description",
"""
The name of a file where the statistics
will be written.
""",
"layout:section", "Statistics",
"label", "File Name",
],
"options.statisticsFileName.value" : [
"plugValueWidget:type", "GafferUI.FileSystemPathPlugValueWidget",
"pathPlugValueWidget:leaf", True,
"pathPlugValueWidget:bookmarks", "statistics",
"fileSystemPathPlugValueWidget:extensions", IECore.StringVectorData( [ "htm", "html", "txt", "stats" ] ),
],
"options.statisticsProgress" : [
"description",
"""
Turning this on causes a render progress
percentage to be printed out continuously
during rendering.
""",
"layout:section", "Statistics",
"label", "Progress",
],
# Search Paths
"options.shaderSearchPath" : [
"description",
"""
The filesystem paths where shaders are
searched for. Paths should be separated
by ':'.
""",
"layout:section", "Search Paths",
"label", "Shaders",
],
"options.textureSearchPath" : [
"description",
"""
The filesystem paths where shaders are
located. Paths should be separated
by ':'.
""",
"layout:section", "Search Paths",
"label", "Textures",
],
"options.displaySearchPath" : [
"description",
"""
The filesystem paths where display driver
plugins are located. These will be used when searching
for drivers specified using the Outputs
node. Paths should be separated by ':'.
""",
"layout:section", "Search Paths",
"label", "Displays",
],
"options.archiveSearchPath" : [
"description",
"""
The filesystem paths where RIB archives
are located. These will be used when searching
for archives specified using the ExternalProcedural
node. Paths should be separated by ':'.
""",
"layout:section", "Search Paths",
"label", "Archives",
],
"options.proceduralSearchPath" : [
"description",
"""
The filesystem paths where DSO procedurals
are located. These will be used when searching
for procedurals specified using the ExternalProcedural
node. Paths should be separated by ':'.
""",
"layout:section", "Search Paths",
"label", "Procedurals",
],
}
)
|
nilq/baby-python
|
python
|
from fastapi import Request
from geobook.db.backends.mongodb import exceptions
from starlette.responses import JSONResponse
async def validation_exception_handler(
request: Request,
exc: exceptions.ValidationError,
) -> JSONResponse:
headers = getattr(exc, 'headers', None)
if headers:
return JSONResponse(
{'detail': f'{exc}'}, status_code=400, headers=headers
)
else:
return JSONResponse(
{'detail': f'{exc}'}, status_code=400, headers=headers
)
|
nilq/baby-python
|
python
|
# Generated by Django 2.0.5 on 2018-06-12 17:31
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('webarchives', '0004_auto_20180609_1839'),
]
operations = [
migrations.AlterField(
model_name='importedrecord',
name='metadata',
field=django.contrib.postgres.fields.jsonb.JSONField(default={}),
),
]
|
nilq/baby-python
|
python
|
from collections import deque
import numpy as np
from gym import spaces
from gym.envs.atari.atari_env import AtariEnv
from . import utils
class MultiFrameAtariEnv(AtariEnv):
metadata = {'render.modes': ['human', 'rgb_array']}
no_op_steps = 30
def __init__(self, game='pong', obs_type='image', buf_size=4, gray=True,
frameskip=4, repeat_action_probability=0.):
super(MultiFrameAtariEnv, self).__init__(game=game, obs_type=obs_type,
frameskip=frameskip,
repeat_action_probability=repeat_action_probability)
self._cur_st = None
self._nx_st = None
self._img_buf = deque(maxlen=buf_size)
self._gray = gray
self._shape = (84, 84)
if self._gray:
self.observation_space = spaces.Box(low=0, high=255,
shape=(self._shape[0], self._shape[1], buf_size),
dtype=np.uint8)
else:
self.observation_space = spaces.Box(low=0, high=255,
shape=(self._shape[0], self._shape[1], 3, buf_size),
dtype=np.uint8)
self._initialize()
def _initialize(self):
self._nx_st = super(MultiFrameAtariEnv, self).reset()
for _ in range(self._img_buf.maxlen):
self._img_buf.append(utils.preprocess(self._nx_st, self._shape, self._gray))
for _ in range(np.random.randint(1, self.no_op_steps) // self.frameskip):
self.step(0)
def step(self, a):
self._cur_st = self._nx_st.copy()
self._nx_st, reward, done, info = super(MultiFrameAtariEnv, self).step(a)
nx_st = np.maximum(self._nx_st, self._cur_st) if self._gray else self._nx_st
self._img_buf.append(utils.preprocess(nx_st, self._shape, self._gray))
return np.array(list(self._img_buf)), reward, done, info
def reset(self):
self._img_buf.clear()
self._initialize()
return np.array(list(self._img_buf))
from gym.envs.registration import register
for game in ['air_raid', 'alien', 'amidar', 'assault', 'asterix', 'asteroids', 'atlantis',
'bank_heist', 'battle_zone', 'beam_rider', 'berzerk', 'bowling', 'boxing', 'breakout', 'carnival',
'centipede', 'chopper_command', 'crazy_climber', 'demon_attack', 'double_dunk',
'elevator_action', 'enduro', 'fishing_derby', 'freeway', 'frostbite', 'gopher', 'gravitar',
'hero', 'ice_hockey', 'jamesbond', 'journey_escape', 'kangaroo', 'krull', 'kung_fu_master',
'montezuma_revenge', 'ms_pacman', 'name_this_game', 'phoenix', 'pitfall', 'pong', 'pooyan',
'private_eye', 'qbert', 'riverraid', 'road_runner', 'robotank', 'seaquest', 'skiing',
'solaris', 'space_invaders', 'star_gunner', 'tennis', 'time_pilot', 'tutankham', 'up_n_down',
'venture', 'video_pinball', 'wizard_of_wor', 'yars_revenge', 'zaxxon']:
name = ''.join([g.capitalize() for g in game.split('_')])
register(
id='MultiFrame{}-v0'.format(name),
entry_point='distributed_rl.libs.wrapped_env:MultiFrameAtariEnv',
kwargs={'game': game, 'obs_type': 'image'},
max_episode_steps=10000,
nondeterministic=False,
)
register(
id='SingleFrame{}-v0'.format(name),
entry_point='distributed_rl.libs.wrapped_env:MultiFrameAtariEnv',
kwargs={'game': name, 'obs_type': 'image', 'buf_size': 1, 'gray': False},
max_episode_steps=10000,
nondeterministic=False,
)
|
nilq/baby-python
|
python
|
from random import randint
from time import sleep
computador = randint(0, 5)
print('-=' * 20)
print('Vou pensar em um número entre 0 e 5.Tente adivinhar...')
print('-=' * 20)
jogador = int(input('Em que número eu pensei? '))
print('Processando...')
sleep(3)
if jogador == computador:
print('Parabéns! Você acertou!')
else:
print(f'Eu ganhei! Eu pensei no número {computador} e não no {jogador}.')
|
nilq/baby-python
|
python
|
try:
from pycaret.internal.pycaret_experiment import TimeSeriesExperiment
using_pycaret=True
except ImportError:
using_pycaret=False
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
import json
import argparse
from dashboard.common import elastic_access
from dashboard.common import logger_utils
from dashboard.conf import config
from dashboard.conf import testcases
from dashboard_assembler import DashboardAssembler
from visualization_assembler import VisualizationAssembler
logger = logger_utils.DashboardLogger('elastic2kibana').get
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config-file",
dest='config_file',
help="Config file location")
args = parser.parse_args()
CONF = config.APIConfig().parse(args.config_file)
_installers = {'fuel', 'apex', 'compass', 'joid'}
class KibanaConstructor(object):
def __init__(self):
super(KibanaConstructor, self).__init__()
self.js_dict = {}
def construct(self):
for project, case_dicts in testcases.testcases_yaml.items():
for case in case_dicts:
self._construct_by_case(project, case)
return self
def _construct_by_case(self, project, case):
case_name = case.get('name')
vis_ps = case.get('visualizations')
family = case.get('test_family')
for vis_p in vis_ps:
self._construct_by_vis(project, case_name, family, vis_p)
def _construct_by_vis(self, project, case, family, vis_p):
for installer in _installers:
pods_and_scenarios = self._get_pods_and_scenarios(project,
case,
installer)
for pod, scenarios in pods_and_scenarios.iteritems():
visualizations = self._construct_visualizations(project,
case,
installer,
pod,
scenarios,
vis_p,
CONF.es_url,
CONF.es_creds)
dashboard = DashboardAssembler(project,
case,
family,
installer,
pod,
visualizations,
CONF.es_url,
CONF.es_creds)
self._set_js_dict(case,
pod,
installer,
family,
vis_p.get('name'),
dashboard.id)
@staticmethod
def _construct_visualizations(project,
case,
installer,
pod,
scenarios,
vis_p,
es_url,
es_creds):
visualizations = []
for scenario in scenarios:
visualizations.append(VisualizationAssembler(project,
case,
installer,
pod,
scenario,
vis_p,
es_url,
es_creds))
return visualizations
def _set_js_dict(self, case, pod, installer, family, metric, id):
test_label = '{} {}'.format(case, metric)
if family not in self.js_dict:
self.js_dict[family] = {}
js_test_family = self.js_dict[family]
if test_label not in js_test_family:
js_test_family[test_label] = {}
js_test_label = js_test_family[test_label]
if installer not in js_test_label:
js_test_label[installer] = {}
js_installer = js_test_label[installer]
js_installer[pod] = CONF.kibana_url + '#/dashboard/' + id
def config_js(self):
with open(CONF.js_path, 'w+') as conf_js_fdesc:
conf_js_fdesc.write('var kibana_dashboard_links = ')
conf_js_fdesc.write(str(self.js_dict).replace("u'", "'"))
def _get_pods_and_scenarios(self, project, case, installer):
query = json.JSONEncoder().encode({
"query": {
"bool": {
"must": [
{"match_all": {}}
],
"filter": [
{"match": {"installer": installer}},
{"match": {"project_name": project}},
{"match": {"case_name": case}}
]
}
}
})
elastic_data = elastic_access.get_docs(CONF.index_url,
CONF.es_creds,
query)
pods_and_scenarios = {}
for data in elastic_data:
pod = data['pod_name']
if pod in pods_and_scenarios:
pods_and_scenarios[pod].add(data['scenario'])
else:
pods_and_scenarios[pod] = {data['scenario']}
if 'all' in pods_and_scenarios:
pods_and_scenarios['all'].add(data['scenario'])
else:
pods_and_scenarios['all'] = {data['scenario']}
return pods_and_scenarios
def main():
KibanaConstructor().construct().config_js()
|
nilq/baby-python
|
python
|
#!/home/ubuntu/DEF/PG/project/venv3/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
nilq/baby-python
|
python
|
from .layers import *
|
nilq/baby-python
|
python
|
import os
import json
class Config:
def __init__(self, configpath):
self._data = self._get_config_data(configpath)
self.client_id = self._data['client_id']
self.username = self._data['username']
self.account_id = self._data['account_id']
self.redirect = self._data['redirect']
self.allocations = self._data['allocations']
def _get_config_data(self, configpath):
return json.load(open(os.path.expandvars(configpath)))
|
nilq/baby-python
|
python
|
class OpenL3Error(Exception):
"""The root OpenL3 exception class"""
pass
|
nilq/baby-python
|
python
|
from aiogram.dispatcher.filters.state import State, StatesGroup
class AddStoreForm(StatesGroup):
city_id = State()
store_id = State()
class SearchSku(StatesGroup):
select_sku = State()
|
nilq/baby-python
|
python
|
import os
import json
from flask import request, abort, jsonify, make_response, Response
from jsonschema import validate, ErrorTree, Draft4Validator as Validator
from app.properties import properties_bp
from .models import Property
from .repositories import PropertyRepository as Repository
from .utils import load
@properties_bp.route('/properties', methods=['POST'])
def create_properties():
if (not request.is_json):
abort(make_response(jsonify(message='Mime type is not valid'), 415))
else:
errors = Validator(load('schemas/property.schema')).iter_errors(request.get_json())
response_error = _errors(errors)
if (response_error):
resp = Response(response_error,
status=422,
mimetype="application/json")
return resp
else:
prop = Property(**request.get_json())
Repository.create(prop)
return jsonify(prop.as_dict()), 201
@properties_bp.route('/properties/<id>')
def find_property(id):
prop = Repository.find_by_id(id);
if (not prop):
message = 'Property id {} not found'.format(id)
abort(make_response(jsonify(message=message), 404))
return jsonify(prop.as_dict()), 200
@properties_bp.route('/properties')
def search_properties():
upper_x = request.args['ax']
upper_y = request.args['ay']
bottom_x = request.args['bx']
bottom_y = request.args['by']
params_json = '{"ax":%s, "ay":%s, "bx":%s, "by":%s }' % (upper_x, upper_y, bottom_x, bottom_y)
errors = Validator(load('schemas/filter.schema')).iter_errors(json.loads(params_json))
response_error = _errors(errors)
if (response_error):
resp = Response(response_error,
status=422,
mimetype="application/json")
return resp
result = Repository.find_properties(upper_x, bottom_x, bottom_y, upper_y)
if(not result):
message = 'No properties found with these coordinates'
abort(make_response(jsonify(message=message), 404))
else:
response = '{ "foundProperties": %s, "properties": %s }' % (len(result), json.dumps(result, ensure_ascii=False)) #, )
return Response(response,
status=200,
mimetype="application/json")
@properties_bp.errorhandler(400)
def bad_request(e):
return jsonify(error=400, text=str(e)), 400
def _errors(errors):
lista = []
response_error = ''
for error in errors:
msg = '{"field":"%s","message":"%s"}' % (''.join(error.path), error.message)
lista.append(msg)
if (lista):
response_error = '{"errors": [%s]}' % (','.join(lista))
return response_error
|
nilq/baby-python
|
python
|
from .annotation_decorator import annotate
from .annotation_decorator import annotate_class
from .annotation_decorator import annotate_method
from .application import WinterApplication
from .component import Component
from .component import component
from .component import is_component
from .component_method import ComponentMethod
from .component_method import component_method
from .component_method_argument import ArgumentDoesNotHaveDefault
from .component_method_argument import ComponentMethodArgument
from .utils import cached_property
|
nilq/baby-python
|
python
|
import sys
printf = lambda fmt,*args: sys.stdout.write(fmt%args)
printf ("This is a %s of %is of possibilities of %s","test",1000,printf)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import pickle
import pandas as pd
import numpy as np
def convert_and_clean_data(data_dict, fill_na = 1.e-5):
'''
Takes a dataset as a dictionary, then converts it into a Pandas DataFrame for convenience.
Replaces all NA values by the value specified in 'fill_na' (or None).
Cleans up data errors on two observations.
Returns a Pandas DataFrame.
'''
# Convert to DataFrame
data_df = pd.DataFrame.from_dict(data_dict, orient = 'index', dtype = float)
if fill_na:
data_df = data_df.fillna(fill_na)
# Sort columns in correct order
column_names = ['poi', 'salary', 'bonus', 'long_term_incentive', 'deferred_income',
'deferral_payments', 'loan_advances','other', 'expenses', 'director_fees', 'total_payments',
'exercised_stock_options', 'restricted_stock', 'restricted_stock_deferred', 'total_stock_value',
'from_messages', 'to_messages', 'from_poi_to_this_person', 'from_this_person_to_poi',
'shared_receipt_with_poi']
data_df = data_df[column_names]
# Correct two data errors
# Robert Belfer: Data shifted right by one column
for j in xrange(1, 14):
data_df.ix['BELFER ROBERT', j] = data_df.ix['BELFER ROBERT', j + 1]
data_df.ix['BELFER ROBERT', 14] = 1.e-5
# Sanjay Bhatnagar: Data shifted left by one column
for j in xrange(14, 2, -1):
data_df.ix['BHATNAGAR SANJAY', j] = data_df.ix['BHATNAGAR SANJAY', j - 1]
data_df.ix['BHATNAGAR SANJAY', 1] = 1.e-5
return data_df
def drop_outliers(data_df, outliers):
'''
'outliers' is a list of indexes for observations to be dropped.
'''
data_df = data_df.drop(outliers)
return data_df
def create_new_features(data_df, log_columns):
'''
Creates new email-related features by aggregating some of the existing ones.
Applies log transformation to the specified list of features.
'''
# Create 3 aggregate email features to help reduce dimensionality
data_df.loc[:, 'sent_vs_received'] = 1. * data_df.loc[:, 'from_messages'] / \
data_df.loc[:, 'to_messages']
data_df.loc[:, 'total_emails'] = data_df.loc[:, 'from_messages'] + data_df.loc[:, 'to_messages']
data_df.loc[:, 'emails_with_poi'] = data_df.loc[:, 'from_this_person_to_poi'] + \
data_df.loc[:, 'from_poi_to_this_person'] + data_df.loc[:, 'shared_receipt_with_poi']
# Create log-transformed features from the features in list to make data look closer to normal
for col in log_columns:
# Some of the financial data is negative, which causes undefined values with log. Take abs:
data_df.loc[:, 'log_' + col] = np.log(np.abs(data_df.loc[:, col]))
return data_df
log_columns = ['bonus', 'deferred_income', 'long_term_incentive', 'other',
'restricted_stock_deferred', 'total_stock_value']
features_list = ['poi', 'salary', 'bonus', 'long_term_incentive', 'deferred_income',
'deferral_payments', 'loan_advances','other', 'expenses', 'director_fees', 'total_payments',
'exercised_stock_options', 'restricted_stock', 'restricted_stock_deferred',
'total_stock_value', 'from_messages', 'to_messages', 'from_poi_to_this_person',
'from_this_person_to_poi', 'shared_receipt_with_poi']
|
nilq/baby-python
|
python
|
SIZE = (640,480)
TITLE = "Pipboy"
TEXT_COLOUR = (0,255,0)
TEXT_SIZE = 12
TEXT_FONT = '../resources/monofonto.ttf'
BOARDER_SPACE = 10
TOP_BOARDER = 6
BOX_SPACE = 20
|
nilq/baby-python
|
python
|
import os
import sys
import copy
import argparse
from avalon import io
from avalon.tools import publish
import pyblish.api
import pyblish.util
from pype.api import Logger
import pype
import pype.hosts.celaction
from pype.hosts.celaction import api as celaction
log = Logger().get_logger("Celaction_cli_publisher")
publish_host = "celaction"
HOST_DIR = os.path.dirname(os.path.abspath(pype.hosts.celaction.__file__))
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
def cli():
parser = argparse.ArgumentParser(prog="celaction_publish")
parser.add_argument("--currentFile",
help="Pass file to Context as `currentFile`")
parser.add_argument("--chunk",
help=("Render chanks on farm"))
parser.add_argument("--frameStart",
help=("Start of frame range"))
parser.add_argument("--frameEnd",
help=("End of frame range"))
parser.add_argument("--resolutionWidth",
help=("Width of resolution"))
parser.add_argument("--resolutionHeight",
help=("Height of resolution"))
celaction.kwargs = parser.parse_args(sys.argv[1:]).__dict__
def _prepare_publish_environments():
"""Prepares environments based on request data."""
env = copy.deepcopy(os.environ)
project_name = os.getenv("AVALON_PROJECT")
asset_name = os.getenv("AVALON_ASSET")
io.install()
project_doc = io.find_one({
"type": "project"
})
av_asset = io.find_one({
"type": "asset",
"name": asset_name
})
parents = av_asset["data"]["parents"]
hierarchy = ""
if parents:
hierarchy = "/".join(parents)
env["AVALON_PROJECT"] = project_name
env["AVALON_ASSET"] = asset_name
env["AVALON_TASK"] = os.getenv("AVALON_TASK")
env["AVALON_WORKDIR"] = os.getenv("AVALON_WORKDIR")
env["AVALON_HIERARCHY"] = hierarchy
env["AVALON_PROJECTCODE"] = project_doc["data"].get("code", "")
env["AVALON_APP"] = f"hosts.{publish_host}"
env["AVALON_APP_NAME"] = "celaction_local"
env["PYBLISH_HOSTS"] = publish_host
os.environ.update(env)
def main():
# prepare all environments
_prepare_publish_environments()
# Registers pype's Global pyblish plugins
pype.install()
if os.path.exists(PUBLISH_PATH):
log.info(f"Registering path: {PUBLISH_PATH}")
pyblish.api.register_plugin_path(PUBLISH_PATH)
pyblish.api.register_host(publish_host)
return publish.show()
if __name__ == "__main__":
cli()
result = main()
sys.exit(not bool(result))
|
nilq/baby-python
|
python
|
# Copyright 2018 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulate octavia testing."""
import logging
import subprocess
import tenacity
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities.openstack as openstack_utils
class CharmOperationTest(test_utils.OpenStackBaseTest):
"""Charm operation tests."""
@classmethod
def setUpClass(cls):
"""Run class setup for running Octavia charm operation tests."""
super(CharmOperationTest, cls).setUpClass()
def test_pause_resume(self):
"""Run pause and resume tests.
Pause service and check services are stopped, then resume and check
they are started.
"""
self.pause_resume(['apache2'])
class LBAASv2Test(test_utils.OpenStackBaseTest):
"""LBaaSv2 service tests."""
@classmethod
def setUpClass(cls):
"""Run class setup for running LBaaSv2 service tests."""
super(LBAASv2Test, cls).setUpClass()
def test_create_loadbalancer(self):
"""Create load balancer."""
keystone_session = openstack_utils.get_overcloud_keystone_session()
neutron_client = openstack_utils.get_neutron_session_client(
keystone_session)
nova_client = openstack_utils.get_nova_session_client(
keystone_session)
# Get IP of the prepared payload instances
payload_ips = []
for server in nova_client.servers.list():
payload_ips.append(server.networks['private'][0])
self.assertTrue(len(payload_ips) > 0)
resp = neutron_client.list_networks(name='private')
subnet_id = resp['networks'][0]['subnets'][0]
if openstack_utils.dvr_enabled():
resp = neutron_client.list_networks(name='private_lb_fip_network')
vip_subnet_id = resp['networks'][0]['subnets'][0]
else:
vip_subnet_id = subnet_id
octavia_client = openstack_utils.get_octavia_session_client(
keystone_session)
result = octavia_client.load_balancer_create(
json={
'loadbalancer': {
'description': 'Created by Zaza',
'admin_state_up': True,
'vip_subnet_id': vip_subnet_id,
'name': 'zaza-lb-0',
}})
lb_id = result['loadbalancer']['id']
lb_vip_port_id = result['loadbalancer']['vip_port_id']
@tenacity.retry(wait=tenacity.wait_fixed(1),
reraise=True, stop=tenacity.stop_after_delay(900))
def wait_for_lb_resource(octavia_show_func, resource_id,
operating_status=None):
resp = octavia_show_func(resource_id)
logging.info(resp['provisioning_status'])
assert resp['provisioning_status'] == 'ACTIVE', (
'load balancer resource has not reached '
'expected provisioning status: {}'
.format(resp))
if operating_status:
logging.info(resp['operating_status'])
assert resp['operating_status'] == operating_status, (
'load balancer resource has not reached '
'expected operating status: {}'.format(resp))
return resp
logging.info('Awaiting loadbalancer to reach provisioning_status '
'"ACTIVE"')
resp = wait_for_lb_resource(octavia_client.load_balancer_show, lb_id)
logging.info(resp)
result = octavia_client.listener_create(
json={
'listener': {
'loadbalancer_id': lb_id,
'name': 'listener1',
'protocol': 'HTTP',
'protocol_port': 80
},
})
listener_id = result['listener']['id']
logging.info('Awaiting listener to reach provisioning_status '
'"ACTIVE"')
resp = wait_for_lb_resource(octavia_client.listener_show, listener_id)
logging.info(resp)
result = octavia_client.pool_create(
json={
'pool': {
'listener_id': listener_id,
'name': 'pool1',
'lb_algorithm': 'ROUND_ROBIN',
'protocol': 'HTTP',
},
})
pool_id = result['pool']['id']
logging.info('Awaiting pool to reach provisioning_status '
'"ACTIVE"')
resp = wait_for_lb_resource(octavia_client.pool_show, pool_id)
logging.info(resp)
result = octavia_client.health_monitor_create(
json={
'healthmonitor': {
'pool_id': pool_id,
'delay': 5,
'max_retries': 4,
'timeout': 10,
'type': 'HTTP',
'url_path': '/',
},
})
healthmonitor_id = result['healthmonitor']['id']
logging.info('Awaiting healthmonitor to reach provisioning_status '
'"ACTIVE"')
resp = wait_for_lb_resource(octavia_client.health_monitor_show,
healthmonitor_id)
logging.info(resp)
for ip in payload_ips:
result = octavia_client.member_create(
pool_id=pool_id,
json={
'member': {
'subnet_id': subnet_id,
'address': ip,
'protocol_port': 80,
},
})
member_id = result['member']['id']
logging.info('Awaiting member to reach provisioning_status '
'"ACTIVE"')
resp = wait_for_lb_resource(
lambda x: octavia_client.member_show(
pool_id=pool_id, member_id=x),
member_id,
operating_status='ONLINE')
logging.info(resp)
lb_fp = openstack_utils.create_floating_ip(
neutron_client, 'ext_net', port={'id': lb_vip_port_id})
@tenacity.retry(wait=tenacity.wait_fixed(1),
reraise=True, stop=tenacity.stop_after_delay(900))
def get_payload():
return subprocess.check_output(
['wget', '-O', '-',
'http://{}/'.format(lb_fp['floating_ip_address'])],
universal_newlines=True)
snippet = 'This is the default welcome page'
assert snippet in get_payload()
logging.info('Found "{}" in page retrieved through load balancer at '
'"http://{}/"'
.format(snippet, lb_fp['floating_ip_address']))
|
nilq/baby-python
|
python
|
import numpy as np;
import numpy.matlib as npm;
import DataHelper;
# consume class is 0, 1, ..., discrete feature values is 0, 1, 2, ...
class NaiveBayes:
def __init__(self, smoothingFactor):
self.__smoothingFactor = smoothingFactor;
self.__discreteFeatureIndices = None;
self.__discreteFeatureValueNumbers = None;
self.__continuousFeatureIndices = None;
self.__classProbability = None;
self.__discreteFeatureProbability = None;
self.__continuousFeatureArguments = None;
def __calcDiscreteProbability(self, dataSet, featureValueNumbers):
if dataSet is None:
return np.log(np.mat(np.ones((featureValueNumbers.max(), featureValueNumbers.shape[1]))) / featureValueNumbers);
frequency = None;
count = dataSet.shape[0];
result = np.mat(np.zeros((featureValueNumbers.max(), dataSet.shape[1])));
for i in range(0, result.shape[1]):
frequency = DataHelper.statisticFrequency(dataSet[:, i]);
result[:, i] = np.mat([np.log(((frequency[key] if key in frequency else 0) + self.__smoothingFactor) / (count + featureValueNumbers[0, i] * self.__smoothingFactor)) if key < featureValueNumbers[0, i] else np.nan for key in range(0, result.shape[0])]).T;
return result;
def __calcContinuousArguments(self, dataSet, featureCount):
return np.vstack((dataSet.mean(axis = 0), dataSet.std(axis = 0))) if dataSet is not None else np.mat(np.zeros((2, featureCount)));
def train(self, dataSet, featureValueNumbers):
if dataSet is None or not isinstance(dataSet, np.matrix) or featureValueNumbers is None or not isinstance(featureValueNumbers, np.matrix):
raise ValueError();
self.__discreteFeatureIndices = np.where(featureValueNumbers.A.flatten() > 0)[0];
self.__continuousFeatureIndices = np.where(featureValueNumbers.A.flatten() <= 0)[0];
if len(self.__discreteFeatureIndices) > 0:
self.__discreteFeatureValueNumbers = featureValueNumbers[np.where(featureValueNumbers > 0)];
classSets = DataHelper.groupBy(dataSet, -1);
classCount = int(max(classSets.keys())) + 1;
self.__classProbability = np.mat([np.log(((classSets[key].shape[0] if key in classSets else 0) + self.__smoothingFactor) / (dataSet.shape[0] + classCount * self.__smoothingFactor)) for key in range(0, classCount)]);
self.__discreteFeatureProbability = list(range(0, classCount));
self.__continuousFeatureArguments = list(range(0, classCount));
for key in range(0, classCount):
if len(self.__discreteFeatureIndices) > 0:
self.__discreteFeatureProbability[key] = self.__calcDiscreteProbability(classSets[key][:, self.__discreteFeatureIndices] if key in classSets else None, self.__discreteFeatureValueNumbers);
if len(self.__continuousFeatureIndices) > 0:
self.__continuousFeatureArguments[key] = self.__calcContinuousArguments(classSets[key][:, self.__continuousFeatureIndices] if key in classSets else None, len(self.__continuousFeatureIndices));
def predict(self, dataSet):
if dataSet is None or not isinstance(dataSet, np.matrix):
raise ValueError();
discreteRange = None;
discreteSet, continuousSet = None, None;
allProbability, discreteProbability, continuousProbability = None, None, None;
result = np.mat(np.zeros((dataSet.shape[0], self.__classProbability.shape[1])));
if len(self.__discreteFeatureIndices) > 0:
discreteSet = dataSet[:, self.__discreteFeatureIndices];
discreteRange = list(range(0, len(self.__discreteFeatureIndices)));
if len(self.__continuousFeatureIndices) > 0:
continuousSet = dataSet[:, self.__continuousFeatureIndices];
for c in range(0, result.shape[1]):
if discreteSet is not None:
discreteProbability = self.__discreteFeatureProbability[c][np.mat(discreteSet, dtype = int), discreteRange];
if continuousSet is not None:
normalArguments = self.__continuousFeatureArguments[c];
mean, var, std = normalArguments[0, :], np.power(normalArguments[1, :], 2), normalArguments[1, :];
zeroStdIndices = np.where(std == 0)[1];
if len(zeroStdIndices) > 0:
var[:, zeroStdIndices] = 1;
std[:, zeroStdIndices] = 1;
continuousProbability = np.power(continuousSet - mean, 2) / (-2 * var) - np.log(std);
if len(zeroStdIndices) > 0:
continuousProbability[:, zeroStdIndices] = 0;
if discreteSet is not None and continuousSet is not None:
allProbability = np.hstack((discreteProbability, continuousProbability));
elif discreteSet is not None:
allProbability = discreteProbability;
else:
allProbability = continuousProbability;
result[:, c] = allProbability.sum(1);
result = result + self.__classProbability;
return np.mat(result.argmax(axis = 1));
|
nilq/baby-python
|
python
|
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE
# Copyright (c) https://github.com/PyCQA/astroid/blob/main/CONTRIBUTORS.txt
"""This module contains mixin classes for scoped nodes."""
from typing import TYPE_CHECKING, Dict, List, TypeVar
from astroid.filter_statements import _filter_stmts
from astroid.nodes import node_classes, scoped_nodes
from astroid.nodes.scoped_nodes.utils import builtin_lookup
if TYPE_CHECKING:
from astroid import nodes
_T = TypeVar("_T")
class LocalsDictNodeNG(node_classes.LookupMixIn, node_classes.NodeNG):
"""this class provides locals handling common to Module, FunctionDef
and ClassDef nodes, including a dict like interface for direct access
to locals information
"""
# attributes below are set by the builder module or by raw factories
locals: Dict[str, List["nodes.NodeNG"]] = {}
"""A map of the name of a local variable to the node defining the local."""
def qname(self):
"""Get the 'qualified' name of the node.
For example: module.name, module.class.name ...
:returns: The qualified name.
:rtype: str
"""
# pylint: disable=no-member; github.com/pycqa/astroid/issues/278
if self.parent is None:
return self.name
return f"{self.parent.frame(future=True).qname()}.{self.name}"
def scope(self: _T) -> _T:
"""The first parent node defining a new scope.
:returns: The first parent scope node.
:rtype: Module or FunctionDef or ClassDef or Lambda or GenExpr
"""
return self
def _scope_lookup(self, node, name, offset=0):
"""XXX method for interfacing the scope lookup"""
try:
stmts = _filter_stmts(node, self.locals[name], self, offset)
except KeyError:
stmts = ()
if stmts:
return self, stmts
# Handle nested scopes: since class names do not extend to nested
# scopes (e.g., methods), we find the next enclosing non-class scope
pscope = self.parent and self.parent.scope()
while pscope is not None:
if not isinstance(pscope, scoped_nodes.ClassDef):
return pscope.scope_lookup(node, name)
pscope = pscope.parent and pscope.parent.scope()
# self is at the top level of a module, or is enclosed only by ClassDefs
return builtin_lookup(name)
def set_local(self, name, stmt):
"""Define that the given name is declared in the given statement node.
.. seealso:: :meth:`scope`
:param name: The name that is being defined.
:type name: str
:param stmt: The statement that defines the given name.
:type stmt: NodeNG
"""
# assert not stmt in self.locals.get(name, ()), (self, stmt)
self.locals.setdefault(name, []).append(stmt)
__setitem__ = set_local
def _append_node(self, child):
"""append a child, linking it in the tree"""
# pylint: disable=no-member; depending by the class
# which uses the current class as a mixin or base class.
# It's rewritten in 2.0, so it makes no sense for now
# to spend development time on it.
self.body.append(child)
child.parent = self
def add_local_node(self, child_node, name=None):
"""Append a child that should alter the locals of this scope node.
:param child_node: The child node that will alter locals.
:type child_node: NodeNG
:param name: The name of the local that will be altered by
the given child node.
:type name: str or None
"""
if name != "__class__":
# add __class__ node as a child will cause infinite recursion later!
self._append_node(child_node)
self.set_local(name or child_node.name, child_node)
def __getitem__(self, item):
"""The first node the defines the given local.
:param item: The name of the locally defined object.
:type item: str
:raises KeyError: If the name is not defined.
"""
return self.locals[item][0]
def __iter__(self):
"""Iterate over the names of locals defined in this scoped node.
:returns: The names of the defined locals.
:rtype: iterable(str)
"""
return iter(self.keys())
def keys(self):
"""The names of locals defined in this scoped node.
:returns: The names of the defined locals.
:rtype: list(str)
"""
return list(self.locals.keys())
def values(self):
"""The nodes that define the locals in this scoped node.
:returns: The nodes that define locals.
:rtype: list(NodeNG)
"""
# pylint: disable=consider-using-dict-items
# It look like this class override items/keys/values,
# probably not worth the headache
return [self[key] for key in self.keys()]
def items(self):
"""Get the names of the locals and the node that defines the local.
:returns: The names of locals and their associated node.
:rtype: list(tuple(str, NodeNG))
"""
return list(zip(self.keys(), self.values()))
def __contains__(self, name):
"""Check if a local is defined in this scope.
:param name: The name of the local to check for.
:type name: str
:returns: True if this node has a local of the given name,
False otherwise.
:rtype: bool
"""
return name in self.locals
class ComprehensionScope(LocalsDictNodeNG):
"""Scoping for different types of comprehensions."""
scope_lookup = LocalsDictNodeNG._scope_lookup
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-07-14 12:19
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Capability',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('actuator', models.CharField(max_length=200)),
('action', models.CharField(max_length=50)),
('remote_id', models.IntegerField()),
('remote_name', models.CharField(max_length=200)),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='CybOXType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('identifier', models.CharField(max_length=50)),
('template', models.TextField(default='{}', max_length=1000)),
],
),
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('raw_message', models.TextField(max_length=5000)),
('created_at', models.DateTimeField(blank=True, default=django.utils.timezone.now)),
('sent_at', models.DateTimeField(blank=True, null=True)),
('upstream_respond_to', models.CharField(max_length=5000, null=True)),
('upstream_command_ref', models.CharField(max_length=100, null=True)),
('capability', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reactor_master.Capability')),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='JobStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='Relay',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('url', models.CharField(max_length=400)),
('username', models.CharField(blank=True, max_length=200, null=True)),
('password', models.CharField(blank=True, max_length=200, null=True)),
],
),
migrations.CreateModel(
name='Response',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('raw_message', models.CharField(max_length=5000)),
('created_at', models.DateTimeField(blank=True, default=django.utils.timezone.now)),
('job', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reactor_master.Job')),
],
),
migrations.CreateModel(
name='Target',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=140)),
('raw_message', models.TextField(max_length=500)),
('cybox_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reactor_master.CybOXType')),
],
),
migrations.AddField(
model_name='job',
name='status',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reactor_master.JobStatus'),
),
migrations.AddField(
model_name='job',
name='target',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reactor_master.Target'),
),
migrations.AddField(
model_name='capability',
name='requires',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reactor_master.CybOXType'),
),
migrations.AddField(
model_name='capability',
name='via',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reactor_master.Relay'),
),
]
|
nilq/baby-python
|
python
|
>>> 3
3
>>> _*_, _**0.5
(9, 1.7320508075688772)
>>>
|
nilq/baby-python
|
python
|
import bson
import os, base64, zlib, random
from bson import BSONCoding, import_class
class Member(BSONCoding):
def __init__(self, username = "", code = "00", password = ""):
self.username = username
self.code = code
self.password = password
def __str__(self):
display = "Username : \nCode : \nPassword : "
if isinstance(self.username, basestring) and isinstance(self.code, basestring) and isinstance(self.password, basestring):
display = "Username : " + str(self.username) + "\nCode : " + str(self.code) + "\nPassword : " + str(self.password)
return display
def bson_encode(self):
return {"username": self.username, "code": self.code, "password": self.password}
def bson_init(self, raw_values):
self.username = raw_values["username"]
self.code = raw_values["code"]
self.password = raw_values["password"]
def __eq__(self, other):
if not isinstance(other, Member):
return NotImplemented
if self.username != other.code:
return False
if self.code != other.code:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
class FileReader(BSONCoding):
def __init__(self, file_name):
self.filename = file_name
self._id = random.randint(0, 2000)
def __str__(self):
if isinstance(self.filename, basestring) and (self.filename.__eq__('oold_flag.py') or self.filename.__eq__('new_flag.py')):
fh = open('utils/'+self.filename,'r')
display = "File Id: " + str(self._id) + \
"\n==========================================\n" + \
fh.read()
fh.close()
return display
else:
return "File Id: " + str(self._id) + \
"\n==========================================\n"
def bson_encode(self):
return {"filename": self.filename, "_id": self._id}
def bson_init(self, raw_values):
self.filename = raw_values["filename"]
self._id = raw_values["_id"]
def __eq__(self, other):
if not isinstance(other, FileReader):
return NotImplemented
if self.filename != other.filename:
return False
if self._id != other._id:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
# import serpy, json, os
# import random, pickle, base64
# class Member(object):
# username = ""
# code = 0
# password = ""
# def __init__(self, username = "", code = 0, password = ""):
# self.username = username
# self.code = code
# self.password = password
# def __str__(self):
# display = "Username : " + self.username + "\nCode : " + str(self.code) + "\nPassword : " + self.password
# return display
# class FileReader(object):
# _id = 0
# filename = ""
# def __init__(self, file_name):
# self.filename = file_name
# self._id = random.randint(0, 2000)
# def __str__(self):
# if self.filename in ["utils/oold_flag.py", "utils/new_flag.py"]:
# return "File Id: " + str(self._id) + "\n==========================================\n" + open(self.filename).read()
# else:
# return "File Id: " + str(self._id) + "\n==========================================\n"
# class FileReaderSerializer(serpy.Serializer):
# Id = serpy.Field(attr="_id")
# filename = serpy.Field(attr="filename")
# file = serpy.MethodField()
# def get_file(self, obj):
# return obj.__str__()
# class MemberSerializer(serpy.Serializer):
# username = serpy.Field(attr='username')
# code = serpy.Field(attr='code')
# password = serpy.Field(attr='password')
# member = serpy.MethodField()
# def get_member(self, obj):
# return obj.__str__()
|
nilq/baby-python
|
python
|
# library/package semantic version
__api_version__ = '1.4'
__generation__ = 1
|
nilq/baby-python
|
python
|
from math import (
sin,
cos,
tan,
acos,
radians,
degrees,
)
from datetime import (
timedelta,
)
def earth_declination(n):
return 23.45 * sin(radians(360/365 * (284+n)))
def td(lat):
dec = earth_declination(119) #TODO Change this literal
cofactor = -(tan(radians(lat)) * tan(radians(dec)))
return 2/15 * degrees(acos(cofactor))
def longitude_correction(lng, fuse):
diff = lng - fuse
return timedelta(
minutes=(diff * 60) / 15
)
def day_range(td, lng, fuse):
td /= 2
sunrise = timedelta(hours=12-td)
sunset = timedelta(hours=12+td)
correction = longitude_correction(lng, fuse)
sunrise += correction
sunset += correction
return (sunrise, sunset)
lat = -23.543333
lng = 46.633056
fuse = 45
td = td(lat)
sunrise, sunset = day_range(td, lng, fuse)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Test app."""
from __future__ import absolute_import
import pytest
from flask import Flask
from invenio_oaiserver import InvenioOAIServer
def test_version():
"""Test version import."""
from invenio_oaiserver import __version__
assert __version__
def test_init():
"""Test extension initialization."""
app = Flask('testapp')
with pytest.warns(None):
InvenioOAIServer(app)
|
nilq/baby-python
|
python
|
'''
Tests for the lookup module.
'''
import sys
import unittest
from io import StringIO
from unittest.mock import patch
from modules import rec_lookup
sys.path.insert(0, "0_1_0/")
class TestLookup(unittest.TestCase):
'''Lokup Tests'''
def setUp(self):
self.nodes_json = StringIO(
'''[
{
"id": 1,
"name": "Test Node",
"mac": "0011223344556677",
"tool": false,
"door": false,
"qr_toggle": false,
"hub": 1,
"facility": "7659e76b-470c-4d5f-bff4-fcc120f08848",
"qr_code": null
}
]'''
)
self.nodes_alternative_json = StringIO(
'''[
{
"id": 1,
"name": "Test Node",
"mac": "0000000000000000",
"tool": false,
"door": false,
"qr_toggle": false,
"hub": 1,
"facility": "7659e76b-470c-4d5f-bff4-fcc120f08848",
"qr_code": null
}
]'''
)
self.nodes_empty_json = StringIO('')
def test_count_matching_mac(self):
'''
Confirms MAC conflection is counted correctly.
'''
count_result = rec_lookup.count_matching_mac("12345678")
self.assertEqual(count_result, 0)
self.assertEqual(
rec_lookup.count_matching_mac("0011223344556677"),
0
)
with patch('modules.rec_lookup.open') as mock_open:
mock_open.return_value = self.nodes_json
count_result = rec_lookup.count_matching_mac("0011223344556677")
mock_open.assert_called()
self.assertEqual(count_result, 1)
with patch('modules.rec_lookup.open') as mock_open:
mock_open.return_value = self.nodes_alternative_json
count_result = rec_lookup.count_matching_mac("0011223344556677")
mock_open.assert_called()
self.assertEqual(count_result, 0)
with patch('modules.rec_lookup.open') as mock_open:
mock_open.return_value = self.nodes_empty_json
self.assertEqual(
rec_lookup.count_matching_mac("0011223344556677"),
0
)
mock_open.assert_called()
class TestLookUpAccessRequest(unittest.TestCase): # pylint: disable=R0904
'''Access Request Tests'''
def setUp(self):
'''
Collection of JSON used for testing.
'''
self.system_json = StringIO(
'''{
"serial": "536780dfe639468e8e23fc568006950d",
"timezone": "America/New_York",
"CurrentVersion": "0_0_0",
"HUBid": 40,
"Token": "5a12ff36eed2f0647a48af62e635eb8cfd4c5979",
"facility": "3b9fdc97-9649-4c80-8b48-10df647bd032"
}'''
)
self.nodes_json = StringIO(
'''[
{
"id": 1,
"name": "Test Node",
"mac": "0011223344556677",
"tool": false,
"door": false,
"qr_toggle": false,
"hub": 1,
"facility": "7659e76b-470c-4d5f-bff4-fcc120f08848",
"qr_code": null
}
]'''
)
self.members_json = StringIO(
'''[
{
"cardNumber": "313233343536373839",
"access_group": 123,
"phone_number": "1234567890",
"address": "1331 12th ave",
"city": "Altoona",
"state": "PA",
"zip_code": "16601",
"username": "BestName",
"first_name": "John",
"last_name": "Doe",
"email": "email@email.com",
"restricted_nodes": [0,9,8]
}
]'''
)
self.owners_json = StringIO(
'''[
{
"facility": "3b9fdc97-9649-4c80-8b48-10df647bd032",
"cardNumber": "30393837363534333231",
"phone_number": null,
"address": null,
"city": null,
"state": null,
"zip_code": null,
"username": "OwnerUserName",
"first_name": "Jim",
"last_name": "John",
"email": "email@email.com"
}
]'''
)
self.permissions_json = StringIO(
'''[
{
"id": 1,
"name": "General Access",
"startTime": "20:20:20",
"endTime": "23:23:23",
"monday": true,
"tuesday": true,
"wednesday": true,
"thursday": true,
"friday": true,
"saturday": true,
"sunday": true,
"twenty_four_seven": false,
"default_fallback": true,
"facility": "3b9fdc97-9649-4c80-8b48-10df647bd032",
"allowedNodes": [1, 4, 6]
}
]'''
)
# ----------------------------------- _alt ----------------------------------- #
self.system_json = StringIO(
'''{
"serial": "536780dfe639468e8e23fc568006950d",
"timezone": "America/New_York",
"CurrentVersion": "0_0_0",
"HUBid": 40,
"Token": "5a12ff36eed2f0647a48af62e635eb8cfd4c5979",
"facility": "3b9fdc97-9649-4c80-8b48-10df647bd032"
}'''
)
self.nodes_json_alt = StringIO(
'''[
{
"id": 1,
"name": "Test Node",
"mac": "0011223344556677",
"tool": false,
"door": false,
"qr_toggle": false,
"hub": 1,
"facility": "7659e76b-470c-4d5f-bff4-fcc120f08848",
"qr_code": null
}
]'''
)
self.members_json_alt = StringIO(
'''[
{
"cardNumber": "313233343536373839",
"access_group": 123,
"phone_number": "1234567890",
"address": "1331 12th ave",
"city": "Altoona",
"state": "PA",
"zip_code": "16601",
"username": "BestName",
"first_name": "John",
"last_name": "Doe",
"email": "email@email.com",
"restricted_nodes": [0,9,8]
}
]'''
)
self.owners_json_alt = StringIO(
'''[
{
"facility": "3b9fdc97-9649-4c80-8b48-10df647bd032",
"cardNumber": "30393837363534333231",
"phone_number": null,
"address": null,
"city": null,
"state": null,
"zip_code": null,
"username": "OwnerUserName",
"first_name": "Jim",
"last_name": "John",
"email": "email@email.com"
}
]'''
)
self.permissions_json_alt = StringIO(
'''[
{
"id": 1,
"name": "General Access",
"startTime": "20:20:20",
"endTime": "23:23:23",
"monday": true,
"tuesday": true,
"wednesday": true,
"thursday": true,
"friday": true,
"saturday": true,
"sunday": true,
"twenty_four_seven": false,
"default_fallback": true,
"facility": "3b9fdc97-9649-4c80-8b48-10df647bd032",
"allowedNodes": [1, 4, 6]
}
]'''
)
def test_files_opened(self):
'''
Confirms that all the files are correctly opened and read.
'''
with patch('modules.rec_lookup.open') as mock_open:
mock_open.side_effect = [
self.system_json,
self.nodes_json, # Opened from conversion function.
self.owners_json, # Opened from owner check function.
self.members_json, # Opened from get_details function.
self.permissions_json, # Opened from get_group_details function.
]
self.assertAlmostEqual(rec_lookup.access_request(313131, '0011223344556677'), 2)
mock_open.assert_called()
def test_mac_to_id(self):
'''
Confirms that the mac address is converted to the node id.
'''
with patch('modules.rec_lookup.open') as mock_open:
mock_open.return_value = self.nodes_json
node_id = rec_lookup.mac_to_id('0011223344556677')
mock_open.assert_called()
self.assertEqual(node_id, 1)
with patch('modules.rec_lookup.open') as mock_open:
mock_open.return_value = self.nodes_json_alt
node_id = rec_lookup.mac_to_id('9911223344556677')
mock_open.assert_called()
self.assertEqual(node_id, '9911223344556677')
def test_is_owner(self):
'''
Confirms that the owner check function returns the correct value.
'''
with patch('modules.rec_lookup.open') as mock_open:
mock_open.return_value = self.owners_json
owner = rec_lookup.is_owner('30393837363534333231')
mock_open.assert_called()
self.assertTrue(owner)
with patch('modules.rec_lookup.open') as mock_open:
mock_open.return_value = self.owners_json_alt
owner = rec_lookup.is_owner('99393837363534333231')
mock_open.assert_called()
self.assertFalse(owner)
def test_get_details(self):
'''
Verifies that the correct details are returned.
'''
with patch('modules.rec_lookup.open') as mock_open:
mock_open.return_value = self.members_json
user = rec_lookup.get_details('313233343536373839')
mock_open.assert_called()
self.assertTrue(user['found'])
with patch('modules.rec_lookup.open') as mock_open:
mock_open.return_value = self.members_json_alt
user = rec_lookup.get_details('993233343536373839')
mock_open.assert_called()
self.assertFalse(user['found'])
def test_get_group_details(self):
'''
Verifies that the correct details are returned.
'''
with patch('modules.rec_lookup.open') as mock_open:
mock_open.return_value = self.permissions_json
group = rec_lookup.get_group_details(1)
mock_open.assert_called()
self.assertTrue(group['found'])
with patch('modules.rec_lookup.open') as mock_open:
mock_open.return_value = self.permissions_json_alt
group = rec_lookup.get_group_details(69)
mock_open.assert_called()
self.assertFalse(group['found'])
def test_access_request_combinations(self):
'''
Checks that the access request function returns the correct values.
'''
with patch('modules.rec_lookup.open') as mock_open:
mock_open.side_effect = [
self.system_json,
self.nodes_json, # Opened from conversion function.
self.owners_json, # Opened from owner check function.
self.members_json, # Opened from get_details function.
self.permissions_json, # Opened from get_group_details function.
]
self.assertEqual(
rec_lookup.access_request(313131, '0011223344556677'),
2
)
mock_open.assert_called()
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
# author: Artan Zandian
# date: 2022-02-18
import tensorflow as tf
from tensorflow.keras.layers import (
Input,
Conv2D,
MaxPooling2D,
Dropout,
Conv2DTranspose,
concatenate,
)
def encoder_block(inputs=None, n_filters=32, dropout=0, max_pooling=True):
"""
Convolutional encoder block
Parameters
----------
inputs: tensor
Input tensor
n_filters: int
Number of convolutional layer channels
dropout: float
Dropout probability between 0 and 1
max_pooling: bool
Whether to MaxPooling2D for spatial dimensions reduction
Returns
-------
next_layer, skip_connection
Next layer for the downsampling section and skip connection outputs
"""
conv = Conv2D(
filters=n_filters,
kernel_size=3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(inputs)
conv = Conv2D(
filters=n_filters,
kernel_size=3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(conv)
# Add dropout if existing
if dropout > 0:
conv = Dropout(dropout)(conv)
# Add MaxPooling2D with 2x2 pool_size
if max_pooling:
next_layer = MaxPooling2D(pool_size=(2, 2))(conv)
else:
next_layer = conv
skip_connection = conv # excluding maxpool from skip connection
return next_layer, skip_connection
def decoder_block(expansive_input, contractive_input, n_filters=32):
"""
Convolutional decoder block
Parameters
----------
expansive_input: tensor
Input tensor
contractive_input: tensor
Input tensor from matching encoder skip layer
n_filters: int
Number of convolutional layers' channels
Returns
-------
conv
Tensor of output layer
"""
up = Conv2DTranspose(
filters=n_filters, kernel_size=(3, 3), strides=2, padding="same"
)(expansive_input)
# Merge the previous output and the contractive_input
# The order of concatenation for channels doesn't matter
merge = concatenate([up, contractive_input], axis=3)
conv = Conv2D(
filters=n_filters,
kernel_size=3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(merge)
conv = Conv2D(
filters=n_filters,
kernel_size=3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(conv)
return conv
def U_Net(input_size=(320, 320, 3), n_filters=32, n_classes=1):
"""
U_Net model
Parameters
----------
input_size: tuple of integers
Input image dimension
n_filters: int
Number of convolutional layer channels
n_classes: int
Number of output classes
Returns
-------
model
tensorflow model
"""
inputs = Input(input_size)
# Encoder section
# ================
# Double the number of filters at each new step
# The first element of encoder_block is input to the next layer
eblock1 = encoder_block(inputs, n_filters)
eblock2 = encoder_block(eblock1[0], n_filters * 2)
eblock3 = encoder_block(eblock2[0], n_filters * 4)
eblock4 = encoder_block(eblock3[0], n_filters * 8, dropout=0.3)
eblock5 = encoder_block(eblock4[0], n_filters * 16, dropout=0.3, max_pooling=False)
# Decoder section
# ================
# Chain the output of the previous block as expansive_input and the corresponding contractive block output
# The second element of encoder_block is input to the skip connection
# Halving the number of filters of the previous block in each section
dblock6 = decoder_block(
expansive_input=eblock5[1],
contractive_input=eblock4[1],
n_filters=n_filters * 8,
)
dblock7 = decoder_block(
expansive_input=dblock6, contractive_input=eblock3[1], n_filters=n_filters * 4
)
dblock8 = decoder_block(
expansive_input=dblock7, contractive_input=eblock2[1], n_filters=n_filters * 2
)
dblock9 = decoder_block(
expansive_input=dblock8, contractive_input=eblock1[1], n_filters=n_filters
)
conv9 = Conv2D(
n_filters, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(dblock9)
# Add a 1x1 Conv2D (projection) layer with n_classes filters to adjust number of output channels
conv10 = Conv2D(filters=n_classes, kernel_size=1, padding="same")(conv9)
model = tf.keras.Model(inputs=inputs, outputs=conv10)
return model
if __name__ == "__main__":
model = U_Net((320, 320, 3), n_filters=32, n_classes=1)
|
nilq/baby-python
|
python
|
##parameters=add='', edit='', preview=''
##
from Products.PythonScripts.standard import structured_text
from Products.CMFCore.utils import getUtilityByInterfaceName
from Products.CMFDefault.utils import decode
from Products.CMFDefault.utils import html_marshal
from Products.CMFDefault.utils import Message as _
atool = getUtilityByInterfaceName('Products.CMFCore.interfaces.IActionsTool')
form = context.REQUEST.form
is_preview = False
if add and \
context.validateHTML(**form) and \
context.discussion_reply(**form):
return
elif preview and \
context.validateHTML(**form):
is_preview = True
options = {}
title = form.get('title', context.Title())
text = form.get('text', '')
options['is_preview'] = is_preview
options['title'] = title
options['text'] = text
options['cooked_text'] = structured_text(text)
if is_preview:
hidden_vars = [ {'name': n, 'value': v}
for n, v in html_marshal(title=title, text=text) ]
else:
hidden_vars = []
buttons = []
target = atool.getActionInfo('object/reply', context)['url']
buttons.append( {'name': 'add', 'value': _(u'Add')} )
if is_preview:
buttons.append( {'name': 'edit', 'value': _(u'Edit')} )
else:
buttons.append( {'name': 'preview', 'value': _(u'Preview')} )
options['form'] = { 'action': target,
'listHiddenVarInfos': tuple(hidden_vars),
'listButtonInfos': tuple(buttons) }
return context.discussion_reply_template(**decode(options, script))
|
nilq/baby-python
|
python
|
import pytest
from django.urls import reverse
from google.auth.exceptions import GoogleAuthError
from crm.factories import UserSocialAuthFactory, ProjectMessageFactory
from crm.models import ProjectMessage
@pytest.mark.django_db
def test_project_message_index(admin_app,
project_message,
project_message_factory):
project_message_factory.create()
url = reverse('crm_projectmessage_modeladmin_index')
admin_app.get(url)
@pytest.mark.django_db
def test_project_message_index_google_auth_error(admin_app,
mocker):
mocker.patch('crm.gmail_utils.sync', side_effect=GoogleAuthError)
url = reverse('crm_projectmessage_modeladmin_index')
r = admin_app.get(url)
assert len(r.context['messages']) == 1
assert 'Can't update messages' in r.text
@pytest.mark.django_db
def test_project_message_index_creates_message(default_site, gmail_service, admin_app, admin_user):
UserSocialAuthFactory(user=admin_user)
assert ProjectMessage.objects.count() == 0
url = reverse('crm_projectmessage_modeladmin_index')
admin_app.get(url)
assert ProjectMessage.objects.count() == 1
@pytest.mark.django_db
def test_project_message_inspect(admin_app,
project_message):
url = reverse('crm_projectmessage_modeladmin_inspect', kwargs={'instance_pk': project_message.pk})
admin_app.get(url)
@pytest.mark.django_db
def test_project_message_inspect_no_project(admin_app):
# https://sentry.io/share/issue/5ca8418a573d4ab59df0e1e5c34a1953/
project_message = ProjectMessageFactory(project=None)
url = reverse('crm_projectmessage_modeladmin_inspect', kwargs={'instance_pk': project_message.pk})
admin_app.get(url)
|
nilq/baby-python
|
python
|
"""
Copyright 2015 INTEL RESEARCH AND INNOVATION IRELAND LIMITED
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import random
import heatclient.client as heatc
import keystoneclient.v2_0.client as keyc
import novaclient.client as novac
import adaptationengine_framework.configuration as cfg
LOGGER = logging.getLogger('syslog')
class OpenStackClients:
"""
Interfaces to the Openstack keystone, nova, and heat APIs
"""
@staticmethod
def get_keystone_client(
auth_url=None,
username=None,
password=None,
tenant_name=None
):
"""Generate a keystone client"""
LOGGER.debug("Generating keystone client")
os_url = auth_url or cfg.openstack__auth_url
os_user = username or cfg.openstack__username
os_pass = password or cfg.openstack__password
os_tenant = tenant_name or cfg.openstack__tenant
keystone_client = keyc.Client(
auth_url=os_url,
username=os_user,
password=os_pass,
tenant_name=os_tenant
)
LOGGER.debug("Generated keystone client")
return keystone_client
@staticmethod
def get_nova_client(
api_version='2',
username=None,
password=None,
tenant=None,
auth_url=None,
timeout=60
):
"""Generate a nova client"""
LOGGER.debug("Generating nova client")
os_url = auth_url or cfg.openstack__auth_url
os_user = username or cfg.openstack__username
os_pass = password or cfg.openstack__password
os_tenant = tenant or cfg.openstack__tenant
nova_client = novac.Client(
api_version,
os_user,
os_pass,
os_tenant,
os_url,
timeout=timeout,
)
LOGGER.debug("Generated nova client")
return nova_client
@staticmethod
def get_heat_client(keystone_client, admin_ks_client=None):
"""Generate a heat client"""
LOGGER.debug("Looking for heat endpoint")
endpoint_ks_client = admin_ks_client or keystone_client
heat_endpoint = OpenStackClients._find_endpoint(
endpoint_ks_client, 'heat', keystone_client.project_id
)
LOGGER.debug("Generating heat client")
heat_client = heatc.Client(
"1", # HEAT_API_VERSION
endpoint=heat_endpoint,
token=keystone_client.auth_token
)
LOGGER.debug("Generated heat client")
return heat_client
@staticmethod
def _find_endpoint(keystone_client, wanted_service, tenant_id=None):
"""Return the endpoint url for a named openstack service"""
if keystone_client is None:
LOGGER.error("Invalid keystone client")
return None
LOGGER.debug(
"Looking for endpoint for service [{}]".format(wanted_service)
)
endpoint = None
service_id = None
for ks_service in keystone_client.services.list():
LOGGER.debug(
"wanted:{}, name:{}, id:{}".format(
wanted_service, ks_service.name, ks_service.id
)
)
if ks_service.name == wanted_service:
service_id = ks_service.id
break
for ks_endpoint in keystone_client.endpoints.list():
LOGGER.debug(
"service_id:{}, endpoint.service_id:{}, "
"endpoint.internalurl:{}".format(
service_id, ks_endpoint.service_id, ks_endpoint.internalurl
)
)
if ks_endpoint.service_id == service_id:
endpoint = ks_endpoint.internalurl
break
LOGGER.debug("Apparent endpoint url [{}]".format(endpoint))
# openstack undocumented version difference #37891
try:
replacement_id = tenant_id or keystone_client.project_id
endpoint = endpoint.replace(
'%(tenant_id)s',
replacement_id
)
endpoint = endpoint.replace(
'$(tenant_id)s',
replacement_id
)
except AttributeError:
LOGGER.error(
"No endpoint found for service [{}] in Keystone".format(
wanted_service
)
)
LOGGER.debug(
"Endpoint url with tenant id [{}]".format(endpoint)
)
return endpoint
@staticmethod
def get_heat_client_for_stack(admin_keystone_client, stack_id):
"""
Generate a Heat client with persmissions to affect a particular stack
Heat doesn't let you look at stacks for other tenants,
so we need to keep trying tenants till we find the one who
owns the stack and return a heat client that will have access.
This all assumes that the keystone user is an admin with access
to give auth tokens for every tenant
"""
for tenant in admin_keystone_client.tenants.list():
try:
ks_tenant_client = OpenStackClients.get_keystone_client(
tenant_name=tenant.name
)
heat_client = OpenStackClients.get_heat_client(
ks_tenant_client,
admin_ks_client=admin_keystone_client
)
try:
heat_client.stacks.get(stack_id)
LOGGER.debug("Returning heat client")
return heat_client
except Exception, err:
LOGGER.debug(
"Stack doesn't belong to tenant {} anyway".format(
tenant.name
)
)
except Exception, err:
LOGGER.error("Exception accessing stacks: {}".format(err))
return None
@staticmethod
def get_openstack_clients():
"""Return keystone, heat, and nova clients"""
keystone_client = OpenStackClients.get_keystone_client()
heat_client = OpenStackClients.get_heat_client(keystone_client)
nova_client = OpenStackClients.get_nova_client()
return (keystone_client, nova_client, heat_client)
class OpenStackInterface:
"""An interface to perform some needed Openstack operations"""
def __init__(self):
"""Generate a nova client for the interface"""
LOGGER.debug("OpenStackInterface init")
self._nova_client = OpenStackClients.get_nova_client()
def get_migration_destination(self, vm_id):
"""get a random host id to move this vm to,
so long as it's not the one it's already on
"""
LOGGER.info("Looking for a host to move vm {} to...".format(vm_id))
hypervisor_list = self._nova_client.hypervisors.list()
valid_hypervisors = []
for hypervisor in hypervisor_list:
hypervisor_hosts = self._nova_client.hypervisors.search(
hypervisor.hypervisor_hostname,
servers=True
)
origin_hypervisor = False
for hypervisor_host in hypervisor_hosts:
try:
for server in hypervisor_host.servers:
if server.get('uuid', None) == vm_id:
origin_hypervisor = True
except AttributeError:
LOGGER.warn("No servers on this hypervisor")
if not origin_hypervisor:
valid_hypervisors.append(hypervisor)
if valid_hypervisors:
LOGGER.info(
"Found these hypervisors {}".format(valid_hypervisors)
)
rando_hype = random.choice(valid_hypervisors)
LOGGER.info(
"Returning this hypervisor [{}]".format(rando_hype)
)
return rando_hype.hypervisor_hostname
else:
LOGGER.warn("Could not find any other hypervisors")
return None
def get_migration_target(self, stack_id):
"""get a vm id from this stack"""
keystone_client = OpenStackClients.get_keystone_client()
heat_client = OpenStackClients.get_heat_client_for_stack(
keystone_client,
stack_id
)
LOGGER.info(
"Looking for a vm that belongs to stack {}".format(stack_id)
)
the_vms = []
for resource in heat_client.resources.list(stack_id):
if resource.resource_type == "OS::Nova::Server":
the_vms.append(resource.physical_resource_id)
LOGGER.info("Found these vms {}".format(the_vms))
rando_vm = random.choice(the_vms)
LOGGER.info("Returning this vm [{}]".format(rando_vm))
return rando_vm
def get_scale_value(self, vm_id):
"""TODO: get the flavour 'up' from this vm's current one"""
# TODO: actually get scale value
tmp_flavour = "2"
LOGGER.warn(
"Returning fake flavour {} for VM uuid {}".format(
tmp_flavour, vm_id
)
)
return tmp_flavour
def get_vm_hypervisor_mapping(self):
server_list = {}
hypvrs = self._nova_client.hypervisors.list()
for hype in hypvrs:
hype_obj = self._nova_client.hypervisors.search(
hype.hypervisor_hostname,
servers=True
)
for h in hype_obj:
try:
for server in h.servers:
server_list[server.get('uuid', None)] = h.hypervisor_hostname
except AttributeError:
pass
return server_list
|
nilq/baby-python
|
python
|
from django.contrib.auth import authenticate
from django.test import TestCase
from django.urls import resolve
from .models import User
from .views import index_view, dashboard_view
from django.contrib.auth.views import LoginView, LogoutView
from django.contrib.auth.decorators import login_required
class UserLoggedInTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(
username="testuser", password="test.pass"
)
self.user.save()
def tearDown(self):
self.user.delete()
def test_correct(self):
user = authenticate(username="testuser", password="test.pass")
self.assertTrue((user is not None) and user.is_authenticated)
def test_wrong_username(self):
user = authenticate(username="user", password="test.pass")
self.assertFalse((user is not None) and user.is_authenticated)
def test_wrong_password(self):
user = authenticate(username="testuser", password="pass")
self.assertFalse((user is not None) and user.is_authenticated)
def test_user_permission(self):
self.assertFalse(self.user.is_superuser)
self.assertTrue(self.user.is_active)
self.assertFalse(self.user.is_staff)
class AdminLoggedInTest(TestCase):
def setUp(self):
self.admin = User.objects.create_superuser(
username="admin", password="admin.pass"
)
self.admin.save()
def teardown(self):
self.admin.delete()
def test_correct(self):
admin = authenticate(username="admin", password="admin.pass")
self.assertTrue((admin is not None) and admin.is_authenticated)
def test_wrong_username(self):
admin = authenticate(username="user", password="admin.pass")
self.assertFalse((admin is not None) and admin.is_authenticated)
def test_wrong_password(self):
admin = authenticate(username="admin", password="pass")
self.assertFalse((admin is not None) and admin.is_authenticated)
def test_superuser_permission(self):
self.assertTrue(self.admin.is_active)
self.assertTrue(self.admin.is_staff)
self.assertTrue(self.admin.is_superuser)
class DashboardPageTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(
username="testuser", password="test.pass"
)
self.response = self.client.login(
username="testuser", password="test.pass"
)
@login_required
def test_user_logged_in(self):
assert self.user.is_authenticated
def test_root_url(self):
response = self.client.get("/")
self.assertEqual(response.status_code, 200)
def test_root_url_view(self):
dashboard_url = resolve("/")
self.assertEqual(dashboard_url.func, index_view)
def test_root_title(self):
response = self.client.get("/")
self.assertContains(response, "<title>TDrive</title>")
def test_root_template(self):
response = self.client.get("/")
self.assertTemplateUsed(response, "registration/index.html")
class LoginPageTest(TestCase):
def test_login_url(self):
response = self.client.get("/login/")
self.assertEqual(response.status_code, 200)
def test_login_title(self):
response = self.client.get("/login/")
self.assertContains(response, "<title>Login | TDrive</title>")
def test_login_template(self):
response = self.client.get("/login/")
self.assertTemplateUsed(response, "registration/login.html")
class UserLoggedOutTest(TestCase):
def setUp(self):
self.user = User.objects.create_user(
username="testuser", password="test.pass"
)
self.response = self.client.login(
username="testuser", password="test.pass"
)
def test_logout_url(self):
response = self.client.get("/logout/?next=/")
self.assertEqual(response['Location'], '/')
self.assertEqual(response.status_code, 302)
|
nilq/baby-python
|
python
|
"""
Example Vis Receive workflow
"""
# pylint: disable=C0103
import logging
import ska_sdp_config
import os
# Initialise logging and configuration
logging.basicConfig()
log = logging.getLogger('main')
log.setLevel(logging.INFO)
config = ska_sdp_config.Config()
# Find processing block configuration from the configuration.
workflow = {
'id': 'vis_receive',
'version': '0.1.0',
'type': 'realtime'
}
log.info("Waiting for processing block...")
for txn in config.txn():
pb = txn.take_processing_block_by_workflow(
workflow, config.client_lease)
if pb is not None:
continue
txn.loop(wait=True)
# Show
log.info("Claimed processing block %s", pb)
# Deploy Vis Receive with 1 worker.
log.info("Deploying Vis Receive...")
deploy_id = pb.pb_id + "-vis-receive"
deploy = ska_sdp_config.Deployment(
deploy_id, "helm", {
'chart': 'vis-receive', # Helm chart deploy/charts/vis-receive
})
for txn in config.txn():
txn.create_deployment(deploy)
try:
# Just idle until processing block or we lose ownership
log.info("Done, now idling...")
for txn in config.txn():
if not txn.is_processing_block_owner(pb.pb_id):
break
txn.loop(True)
finally:
# Clean up vis receive deployment.
for txn in config.txn():
txn.delete_deployment(deploy)
config.close()
|
nilq/baby-python
|
python
|
# coding:utf-8
from django.contrib.auth import authenticate,login,logout
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required, permission_required,user_passes_test
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.http import HttpResponse, HttpResponseRedirect, StreamingHttpResponse
from django.template.loader import get_template
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from wap.models import *
from django.core.mail import EmailMultiAlternatives
from django.core.mail import send_mail as core_send_mail
from django.utils import timezone
from django.core import serializers
from django.template import Context, loader
from SizeConverter.SizeConverter import *
from cyexl import xizhuang,chenshan
import json
import logging
import time
import datetime
import threading
def get_logger():
logger = logging.getLogger()
logger.setLevel(__debug__)
return logger
logging.basicConfig(level=logging.DEBUG)
def login_view(request):
user = authenticate(username=request.POST['username'], password=request.POST['password'])
if user is not None :
request.session['login_from'] = request.META.get('HTTP_REFERER', '/')
login(request, user)
print request.user
return HttpResponseRedirect('/order/order_manage')
else:
#验证失败,暂时不做处理
return HttpResponseRedirect('/order/order_login')
def logout_view(request):
logout(request)
return HttpResponseRedirect('/order/order_login')
def order_login(request):
t = get_template('order/order_login.html')
c = RequestContext(request,locals())
return HttpResponse(t.render(c))
def order_denie(request):
t = get_template('order/order_denie.html')
c = RequestContext(request,locals())
return HttpResponse(t.render(c))
def not_in_orders_group(user):
if user:
return user.groups.filter(name='orders').count() == 1
return False
def not_in_Factory_group(user):
if user:
return user.groups.filter(name='Factory').count() == 1
return False
def not_in_addorder_group(user):
if user:
return user.groups.filter(name='addorder').count() == 1
return False
@login_required
#@user_passes_test(not_in_addorder_group, login_url='/admin/')
def order_manage(request,tab):
mailtext = '已发送'
c = RequestContext(request,locals())
mailurl = '#'
if tab == 'order':
if request.user.groups.filter(name='addorder').count() ==1:
nav = '订单审核'
plant_update_issue = Plant_update.objects.filter(plant_status='退回订单',order__gh=request.user)
plant_update_list = Plant_update.objects.filter(plant_status='订单审查',order__gh=request.user)
mailtext = '提交复审'
mailurl = '/order/order_update_post'
return render_to_response('order/order_add.html', {'a': plant_update_issue,'b':plant_update_list,
'user':c,'nav':nav,'mailtext':mailtext,
'mailurl':mailurl})
if tab == 'wxd':
nav = '复审订单'
if request.user.groups.filter(name='orders').count() ==1:
plant_update_list = Plant_update.objects.filter(plant_status='复审中')
order_list = Order.objects.filter(order_status='复审中')
mailtext = '发送邮件'
mailurl = '/order/kmail'
return render_to_response('order/order_manage.html', {'a': plant_update_list,'user':c,'nav':nav,'mailtext':mailtext,'mailurl':mailurl})
return HttpResponseRedirect('/admin/')
elif tab == 'dzing':
nav = '定制中订单'
order_list = Order.objects.filter(order_status='定制中')
elif tab == 'dzwc':
nav = '制作完成订单'
order_list = Order.objects.filter(order_status='定制完成')
elif tab == 'psing':
nav = '配送中订单'
order_list = Order.objects.filter(order_status='配送中')
elif tab == 'ywc':
nav = '已完成订单'
order_list = Order.objects.filter(order_status='已收货')
return render_to_response('order/orderok.html', {'a': order_list,'user':c,'nav':nav,'mailtext':mailtext,'mailurl':mailurl})
@csrf_exempt
def manage_post(request):
response_data = {}
response_data['code'] = -1
if request.method == 'POST':
orderlist = request.POST.get('id')
orderlist_number = [str(i) for i in orderlist.split(',')]
for ordernumber in orderlist_number:
plant_update = get_object_or_404(Plant_update, order__order_number=ordernumber)
if plant_update.plant_status == '订单审查' or plant_update.plant_status == '退回订单' :
plant_update.plant_status = '复审中'
plant_update.save()
else:
#if plant_update.plant_status == '复审中':
issue = request.POST.get('issue')
plant_update.plant_status = '退回订单'
plant_update.issue = issue
plant_update.save()
return HttpResponseRedirect('/order/order_manage/wxd/')
return HttpResponseRedirect('/order/order_manage/order/')
@csrf_exempt
@user_passes_test(not_in_orders_group, login_url='/order/order_denie')
def kmail(request):
#send_mail(u'123', u'456789','aegeggwd@163.com',['kanghy@brosbespoke.com'], fail_silently=False)
#order = Order.objects.get(order_number= orderid)
orderlist = request.POST.get('id')
orderlist_number = [str(i) for i in orderlist.split(',')]
get_logger().debug('---------------------%s'% orderlist_number)
response_data ={}
subject, from_email, to = '订单号'+orderlist +'下单表', 'aegeggwd@163.com', 'muskliu@brosbespoke.com'
#subject, from_email, to = '订单号'+orderlist +'下单表', 'aegeggwd@163.com', 'kanghy@brosbespoke.com'
text_content = '下单表'
html_content = '<p>This is an <strong>important</strong> message.</p>'
msg = EmailMultiAlternatives(subject, text_content, from_email, [to])
msg.attach_alternative(html_content, "text/html")
for ordernumber in orderlist_number:
oederlist = get_object_or_404(Order, order_number=ordernumber)
fir_name = unicode(oederlist.id) + unicode(oederlist.user.nickname) + '-' + unicode(oederlist.user.phonenumber) + '.xls'
try:
msg.attach_file('/home/Download/'+ fir_name)
except:
exl_download(request,ordernumber)
msg.attach_file('/home/Download/'+ fir_name)
order = Order.objects.get(order_number= ordernumber)
order.order_status = '定制中'
order.save()
plant_update = Plant_update.objects.get(order__order_number= ordernumber)
plant_update.plant_status = '等待制作'
plant_update.save()
pack = Pack.objects.all()
for packs in pack:
packs.volume = packs.volume -1
packs.save()
try:
fabric = Fabric.objects.get(id= order.fabric_id)
if order.product.type == 'suit':
fabric.volume = fabric.volume - 3.5
if order.product.type == 'shirt':
fabric.volume = fabric.volume - 1.7
fabric.save()
except:
pass
msg.send()
response_data['code'] = 0
return HttpResponse(json.dumps(response_data), content_type="application/json")
#return render_to_response('order/mailok.html', {'mailok': mailok,'orderid':orderid})
@login_required
#@user_passes_test(not_in_orders_group, login_url='/order/order_denie')
def exl_download(request,orderid):
oederlist = get_object_or_404(Order, order_number=orderid)
user_id = oederlist.user_id
userid = oederlist.user_id
if oederlist.is4friend:
userlist = get_object_or_404(User, phonenumber=oederlist.friend_phone)
user_id = userlist.id
userinfo = get_object_or_404(User, id=user_id)
users = get_object_or_404(User, id=userid)
sleeve_lefet = userinfo.sleeve_lefet
stomach = userinfo.stomach
favor = userinfo.favor
istie = userinfo.istie
iswatch = userinfo.iswatch
suit_shangyi = userinfo.suit_shangyi
majia_qianchang = userinfo.majia_qianchang
majia_houchang = userinfo.majia_houchang
if not sleeve_lefet or sleeve_lefet == '0':
sleeve_lefet = userinfo.sleeve_right
if not majia_qianchang or majia_qianchang == '0':
majia_qianchang = 0
if not majia_houchang or majia_houchang == '0':
majia_houchang = 0
if not stomach:
stomach = 0
if not favor:
favor = 1
if not istie:
istie = 0
if not iswatch:
iswatch = 2
if not suit_shangyi:
suit_shangyi = 1
#get_logger().debug('-------stomach-------%s'%stomach)
sizeList=[
float(userinfo.lingwei),#领围
float(userinfo.chest),#胸围
float(userinfo.waist),#腰围
float(userinfo.shoulder),#肩宽
float(userinfo.sleeve_right),#袖长(右)
float(sleeve_lefet),#袖长(左)
float(userinfo.back_cloth),#后衣长
float(userinfo.hip),#臀围
float(userinfo.kuyao),#裤腰围
float(userinfo.kuchang),#裤长
float(userinfo.hengdang),#横档
float(userinfo.xiwei),#膝围
float(userinfo.kukou),#裤口
float(majia_houchang),#后长
float(userinfo.xiulong),#袖笼
float(userinfo.chougenfen),#袖根肥
float(userinfo.xiukou_right),#袖口,暂时用的右袖口
float(stomach),#肚围
float(majia_qianchang),#马甲前长
float(userinfo.height),#身高
float(userinfo.weight),#体重
]
userChoice={
'm':int(favor),#m取值 0-修身 1-合身 2-宽松
'i':int(istie),#i取值 0-打领带 1-不打领带
'j':int(iswatch),#j取值 0-手表左 1-手表右 2-无手表
'q':int(suit_shangyi),#q取值0-长款 1-短款
}
get_logger().debug('-------sizeList-%s',sizeList)
get_logger().debug('-------userChoice-%s',userChoice)
d1 = datetime.date.today()
timdata = time.strftime('%Y-%m-%d',time.localtime(time.time()))
d2 = d1 + datetime.timedelta(10)
if oederlist.add_xiuzi and oederlist.product.type == 'shirt':
d2 = d1 + datetime.timedelta(12)
get_logger().debug('-------sizes-'+ str(d2))
order_xx ={'user':users.name,'phone':users.phonenumber,'tmime':timdata,'d2':str(d2)}
bzcc ={}
#get_logger().debug('-------sizes-------%s'%order_xx)
xzks ={}
cycc = SizeConverter(sizeList,userChoice).convert()
get_logger().debug('-------sizes-:%s', cycc)
if oederlist.product.type == 'suit':
bcc = xizhuang(userinfo,users,cycc,oederlist,order_xx)
else:
bcc = chenshan(userinfo,users,cycc,oederlist,order_xx)
def file_iterator(file_name, chunk_size=512):
with open('/home/Download/'+file_name) as f:
while True:
c = f.read(chunk_size)
if c:
yield c
else:
break
the_file_name = str(oederlist.id) + str(users.nickname) +'-'+ str(users.phonenumber) +".xls"
response = StreamingHttpResponse(file_iterator(the_file_name))
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename="{0}"'.format(the_file_name)
return response
@login_required
@user_passes_test(not_in_Factory_group, login_url='/admin/')
def plant_statu(request,tab):
naotext = '已发送'
naourl = '#'
if tab == 'ddzz':
nao = '等待制作订单'
naotext = '提交制作'
naourl = '/order/order_update_post'
plant_update = Plant_update.objects.filter(plant_status ='等待制作')
elif tab == 'zzing':
nao = '制作中订单'
naotext = '提交完成'
naourl = '/order/order_update_post'
plant_update = Plant_update.objects.filter(plant_status ='制作中')
elif tab == 'zzwc':
nao = '定制中订单'
naotext = '提交发货'
naourl = '/order/order_update_post'
plant_update = Plant_update.objects.filter(plant_status ='制作完成')
elif tab == 'psing':
nao = '送货中订单'
naotext = '提交完成'
naourl = '/order/order_update_post'
plant_update = Plant_update.objects.filter(plant_status ='配送中')
elif tab == 'yjf':
nao = '已收货订单'
naotext = '已收货订单'
try:
numberid = request.GET['state']
orderid = request.GET['id']
order = Order.objects.get(id= orderid)
plant = Plant_update.objects.get(order_id= order.id)
order.huifang = numberid
order.save()
plant.plant_status = '订单完成'
#plant.jiaofu_time = time
plant.save()
except:
pass
plant_update = Plant_update.objects.filter(plant_status ='已收货')
c = RequestContext(request,locals())
return render_to_response('order/plant_statu_is.html', {'user':c,'naotext':naotext,'nao':nao,'naourl':naourl,'a':plant_update})
elif tab == 'ddwc':
nao = '订单已完成'
naotext = '订单已完成'
plant_update = Plant_update.objects.filter(plant_status ='订单完成')
c = RequestContext(request,locals())
return render_to_response('order/plant_statu.html', {'user':c,'naotext':naotext,'nao':nao,'naourl':naourl,'a':plant_update})
@csrf_exempt
def order_update_post(request,orderid):
order = Order.objects.get(order_number= orderid)
plant = Plant_update.objects.get(order_id= order.id)
time = timezone.localtime(timezone.now()).strftime("%Y-%m-%d %H:%M:%S")
plant_statu = plant.plant_status
order_statu = order.order_status
if plant_statu == '等待制作':
order.order_status = '定制中'
order.save()
plant.plant_status = '制作中'
plant.zhizuo_time = time
plant.save()
return HttpResponseRedirect('/order/plant_statu/ddzz/')
elif plant_statu == '制作中':
order.order_status = '定制完成'
order.save()
plant.plant_status = '制作完成'
plant.wancheng_time = time
plant.save()
return HttpResponseRedirect('/order/plant_statu/zzing/')
elif plant_statu == '制作完成':
order.order_status = '配送中'
order.save()
plant.plant_status = '配送中'
plant.peishong_time = time
plant.save()
return HttpResponseRedirect('/order/plant_statu/zzwc/')
elif plant_statu == '配送中':
order.order_status = '已收货'
order.save()
plant.plant_status = '已收货'
plant.jiaofu_time = time
plant.save()
return HttpResponseRedirect('/order/plant_statu/psing/')
return HttpResponseRedirect('/order/plant_statu/zzwc')
def order_post(request):
response_data ={}
nam =[]
fabricid = []
addressnam = []
addressid = []
product = request.POST.get('product')
if not product:
product = 1
user = request.POST.get('user')
response_list = Fabric.objects.filter(product=product)
address_list = Address4Order.objects.filter(user=user)
username = User.objects.get(id=user)
for i in response_list:
nam.append(i.name)
fabricid.append(str(i.id))
for i in address_list:
addressnam.append(str(i))
addressid.append(str(i.id))
response_data = {'fabricid':fabricid,'nam':nam,'addressnam':addressnam,'addressid':addressid,'username':str(username)}
return HttpResponse(json.dumps(response_data), content_type="application/json")
def get_user_name(request):
response_data ={}
user_id = request.GET['user_id']
username = User.objects.get(id=user_id)
response_data={'user_name':str(username)}
return HttpResponse(json.dumps(response_data), content_type="application/json")
def get_address_name(request):
response_data ={}
address_id = request.POST['user']
return HttpResponse(json.dumps(response_data), content_type="application/json")
def get_product_name(request):
response_data ={}
product_name = request.POST['product']
try:
products = Product.objects.get(id = product_name)
response_data['type'] = products.type
except:
response_data['type'] = 'suit'
return HttpResponse(json.dumps(response_data), content_type="application/json")
|
nilq/baby-python
|
python
|
import random
import numpy as np
a = [5, -1, 0, -1, 2]
b=-9
if sum(a) > 3 and b < -1:
print(True)
|
nilq/baby-python
|
python
|
from keras.callbacks import TensorBoard
import tensorflow as tf
from keras.callbacks import EarlyStopping, ModelCheckpoint
from exercise_performance_scorer.data_creator import ExercisePerformanceDataCreator
from exercise_performance_scorer.model import ExercisePerformanceModel
class ExercisePerformanceTrainer:
def __init__(self, config):
self._config = config
self._data_creator = ExercisePerformanceDataCreator(config)
self._session = None
self._train_features, self._train_labels, self._test_features, \
self._test_labels, self._val_features, self._val_labels = self._data_creator.get_feature_datasets()
self._ml_model = ExercisePerformanceModel(self._config)
self._ml_model.compile()
def train(self):
with tf.Session() as self._session:
self._session.run(tf.global_variables_initializer())
try:
self._ml_model.load(self._config.model.path)
except OSError:
print("Can't find model. Training from scratch.")
print('Starting training')
tensorboard_cb = TensorBoard(log_dir=self._config.training.log_path, histogram_freq=0,
write_graph=True, write_images=True)
self._ml_model.model.fit(
self._train_features, self._train_labels, validation_data=(self._val_features, self._val_labels),
epochs=self._config.training.epoch_num,verbose=1,
batch_size=self._config.training.batch_size,
callbacks=[ModelCheckpoint(self._config.model.path, 'val_loss', save_best_only=True,
save_weights_only=True),
EarlyStopping(monitor='val_loss', verbose=0,
patience=self._config.training.lr_decrease_patience,
restore_best_weights=True),
tensorboard_cb])
test_loss = self._ml_model.model.evaluate(self._test_features, self._test_labels,
batch_size=self._config.training.batch_size)
print(f'Test loss: {test_loss}')
if __name__ == '__main__':
from config import general_config
trainer = ExercisePerformanceTrainer(general_config.exercise_performance_pipeline)
trainer.train()
|
nilq/baby-python
|
python
|
from urllib.parse import urlparse
from app import logger
import requests
class ControllerService:
def init_app(self, app):
return
def send_command(self, gate, command='open', conditional=False):
try:
if gate.type == 'gatekeeper':
if command == 'open' and not conditional:
requests.get(f'http://{gate.controller_ip}/?a=open', timeout=2)
elif command == 'close' and not conditional:
requests.get(f'http://{gate.controller_ip}/?a=close', timeout=2)
elif command == 'open' and conditional:
requests.get(f'http://{gate.controller_ip}/?a=grant', timeout=2)
elif command == 'close' and conditional:
requests.get(f'http://{gate.controller_ip}/?a=deny', timeout=2)
elif gate.type == 'generic':
if command == 'open':
requests.get(gate.uri_open, timeout=2)
if command == 'close':
requests.get(gate.uri_close, timeout=2)
return True
except:
logger.error("Could not send command to controller (gate: {})".format(gate.name))
return False
def get_status(self, gate):
if gate.type == 'gatekeeper':
try:
response = requests.get(f'http://{gate.controller_ip}/json', timeout=2)
except:
return {'is_alive': False, 'controller_ip': gate.controller_ip}
if not response.ok:
return {'is_alive': False, 'controller_ip': gate.controller_ip}
uptime = 0
try:
data = response.json()
uptime = data['uptime']
except:
pass
return {'is_alive': True, 'controller_ip': gate.controller_ip, 'uptime': uptime}
elif gate.type == 'generic':
# If there is no open uri then we can't do much
if gate.uri_open == '':
return {'is_alive': False}
try:
controller_ip = urlparse(gate.uri_open).hostname
response = requests.get(f'http://{controller_ip}/', timeout=2)
except:
return {'is_alive': False}
if not response.ok:
return {'is_alive': False, 'controller_ip': controller_ip}
return {'is_alive': True, 'controller_ip': controller_ip}
|
nilq/baby-python
|
python
|
from quart import Blueprint
home = Blueprint("home", __name__)
@home.route("/")
def index():
"""Home view.
This view will return an empty JSON mapping.
"""
return {}
|
nilq/baby-python
|
python
|
from django.contrib import admin
from froide.helper.admin_utils import ForeignKeyFilter
class FollowerAdmin(admin.ModelAdmin):
raw_id_fields = (
"user",
"content_object",
)
date_hierarchy = "timestamp"
list_display = ("user", "email", "content_object", "timestamp", "confirmed")
list_filter = (
"confirmed",
("content_object", ForeignKeyFilter),
("user", ForeignKeyFilter),
)
search_fields = ("email",)
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.prefetch_related("user", "content_object")
|
nilq/baby-python
|
python
|
"""
@author: Viet Nguyen <nhviet1009@gmail.com>
"""
import cv2
import numpy as np
from collections import OrderedDict
# https://github.com/tensorflow/datasets/blob/master/tensorflow_datasets/image_classification/quickdraw_labels.txt
# Rule: key of category = index -1, with index from the link above
CLASS_IDS = OrderedDict()
CLASS_IDS[8] = "apple"
CLASS_IDS[35] = "book"
CLASS_IDS[38] = "bowtie"
CLASS_IDS[58] = "candle"
CLASS_IDS[74] = "cloud"
CLASS_IDS[87] = "cup"
CLASS_IDS[94] = "door"
CLASS_IDS[104] = "envelope"
CLASS_IDS[107] = "eyeglasses"
CLASS_IDS[136] = "hammer"
CLASS_IDS[139] = "hat"
CLASS_IDS[156] = "ice cream"
CLASS_IDS[167] = "leaf"
CLASS_IDS[252] = "scissors"
CLASS_IDS[283] = "star"
CLASS_IDS[301] = "t-shirt"
CLASS_IDS[209] = "pants"
CLASS_IDS[323] = "tree"
def get_images(path, classes):
images = [cv2.imread("{}/{}.png".format(path, item), cv2.IMREAD_UNCHANGED) for item in classes]
return images
def get_overlay(bg_image, fg_image, sizes=(40, 40)):
fg_image = cv2.resize(fg_image, sizes)
fg_mask = fg_image[:, :, 3:]
fg_image = fg_image[:, :, :3]
bg_mask = 255 - fg_mask
bg_image = bg_image / 255
fg_image = fg_image / 255
fg_mask = cv2.cvtColor(fg_mask, cv2.COLOR_GRAY2BGR) / 255
bg_mask = cv2.cvtColor(bg_mask, cv2.COLOR_GRAY2BGR) / 255
image = cv2.addWeighted(bg_image * bg_mask, 255, fg_image * fg_mask, 255, 0.).astype(np.uint8)
return image
|
nilq/baby-python
|
python
|
import json
import os
TEST_FILE_BASE_PATH = 'tests'
def __test_pages(app):
testapp = app.test_client()
pages = ('/')
for page in pages:
resp = testapp.get(page)
assert resp.status_code == 200
def test_nearest_stations_mapper():
from transporter.utils import NearestStationsMapper
source_dict = {
"arsId": "0",
"dist": "153",
"gpsX": "127.12347574483393",
"gpsY": "37.39985681895763",
"posX": "210931.81833",
"posY": "433403.53304",
"stationId": "52913",
"stationNm": "\uc544\ub984\ub9c8\uc744.\ubc29\uc544\ub2e4\ub9ac\uc0ac\uac70\ub9ac", # noqa
"stationTp": "0"
}
mapper = NearestStationsMapper()
target_dict = mapper.transform(source_dict)
assert 'latitude' in target_dict
assert 'longitude' in target_dict
assert 'ars_id' in target_dict
assert 'station_name' in target_dict
assert 'distance_from_current_location' in target_dict
assert isinstance(target_dict['latitude'], float)
assert isinstance(target_dict['longitude'], float)
assert isinstance(target_dict['ars_id'], int)
assert isinstance(target_dict['station_name'], str)
assert isinstance(target_dict['distance_from_current_location'], int)
def test_routes_for_station_mapper():
from transporter.utils import RoutesForStationMapper
path = os.path.join(TEST_FILE_BASE_PATH, 'get_station_by_uid.json')
with open(path) as fin:
source_dict = json.loads(fin.read())
mapper = RoutesForStationMapper()
target_dict = mapper.transform(source_dict)
assert 'latitude' in target_dict
assert 'longitude' in target_dict
assert 'entries' in target_dict
def test_route_mapper():
from transporter.utils import RouteMapper
path = os.path.join(TEST_FILE_BASE_PATH, 'get_route_and_pos.json')
with open(path) as fin:
source_dict = json.loads(fin.read())
mapper = RouteMapper()
target_dict = mapper.transform(source_dict)
assert 'route_type' in target_dict
|
nilq/baby-python
|
python
|
import unittest
from pyalink.alink import *
import numpy as np
import pandas as pd
class TestPrintStreamOp(unittest.TestCase):
def test_printstreamop(self):
df = pd.DataFrame([
[0, "abcde", "aabce"],
[1, "aacedw", "aabbed"],
[2, "cdefa", "bbcefa"],
[3, "bdefh", "ddeac"],
[4, "acedm", "aeefbc"]
])
inOp = StreamOperator.fromDataframe(df, schemaStr='id long, text1 string, text2 string')
inOp.print()
StreamOperator.execute()
pass
|
nilq/baby-python
|
python
|
from sqlalchemy import *
from sqlalchemy.orm import relationship, validates
from db import db
class ConfigTemplate(db.Model):
__tablename__ = "config_template"
id = Column(String, primary_key=True)
doc = Column(Text, nullable=False)
language_id = Column(String, ForeignKey('language.id'))
service_configs = relationship('ServiceConfig', back_populates='template')
deployment_configs = relationship('DeploymentConfig', back_populates='template')
language = relationship('Language', back_populates='config_templates')
def __repr__(self):
return self.id
|
nilq/baby-python
|
python
|
import random
import logging
from . import scheme
__all__ = ('MTProtoSessionData',)
log = logging.getLogger(__package__)
class MTProtoSessionData:
def __init__(self, id):
if id is None:
id = random.SystemRandom().getrandbits(64)
log.debug('no session_id provided, generated new session_id: {}'.format(id))
self._id = scheme.int64_c(id)
self._auth_keys = dict()
|
nilq/baby-python
|
python
|
import argparse
import torch
import os
from tqdm import tqdm
from datetime import datetime
import torch.nn as nn
import torch.utils.data as data
from torch.utils.tensorboard import SummaryWriter
import deepab
from deepab.models.PairedSeqLSTM import PairedSeqLSTM
from deepab.util.util import RawTextArgumentDefaultsHelpFormatter
from deepab.datasets.H5PairedSeqDataset import H5PairedSeqDataset
def train_epoch(model, train_loader, criterion, optimizer, device):
"""Trains a model for one epoch"""
model.train()
running_loss = 0
e_i = 0
for inputs, labels, _ in tqdm(train_loader, total=len(train_loader)):
inputs = inputs.to(device)
labels = labels.to(device)[:, 1:]
optimizer.zero_grad()
def handle_batch():
"""Function done to ensure variables immediately get dealloced"""
output = model(src=inputs, trg=inputs)
output = output[1:].permute(1, 2, 0)
loss = criterion(output, labels)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
optimizer.step()
return loss.item()
loss = handle_batch()
running_loss += loss
if e_i % 100 == 0:
print(loss)
e_i += 1
# running_loss += handle_batch()
return running_loss
def validate(model, validation_loader, criterion, device):
""""""
with torch.no_grad():
model.eval()
running_loss = 0
for inputs, labels, _ in tqdm(validation_loader,
total=len(validation_loader)):
inputs = inputs.to(device)
labels = labels.to(device)[:, 1:]
def handle_batch():
"""Function done to ensure variables immediately get dealloced"""
output = model(src=inputs, trg=inputs)
output = output[1:].permute(1, 2, 0)
loss = criterion(output, labels)
return loss.item()
running_loss += handle_batch()
return running_loss
def train(model,
train_loader,
validation_loader,
criterion,
optimizer,
epochs,
device,
lr_modifier,
writer,
save_file,
save_every,
properties=None):
""""""
properties = {} if properties is None else properties
print('Using {} as device'.format(str(device).upper()))
model = model.to(device)
for epoch in range(epochs):
train_loss = train_epoch(model, train_loader, criterion, optimizer,
device)
avg_train_loss = train_loss / len(train_loader)
train_loss_dict = {"cce": avg_train_loss}
writer.add_scalars('train_loss', train_loss_dict, global_step=epoch)
print('\nAverage training loss (epoch {}): {}'.format(
epoch, avg_train_loss))
val_loss = validate(model, validation_loader, criterion, device)
avg_val_loss = val_loss / len(validation_loader)
val_loss_dict = {"cce": avg_val_loss}
writer.add_scalars('validation_loss', val_loss_dict, global_step=epoch)
print('\nAverage validation loss (epoch {}): {}'.format(
epoch, avg_val_loss))
lr_modifier.step(val_loss)
if (epoch + 1) % save_every == 0:
properties.update({'model_state_dict': model.state_dict()})
properties.update({
'optimizer_state_dict': optimizer.state_dict(),
'train_loss': train_loss_dict,
'val_loss': val_loss_dict,
'epoch': epoch
})
torch.save(properties, save_file + ".e{}".format(epoch + 1))
properties.update({'model_state_dict': model.state_dict()})
properties.update({
'optimizer_state_dict': optimizer.state_dict(),
'train_loss': train_loss_dict,
'val_loss': val_loss_dict,
'epoch': epoch
})
torch.save(properties, save_file)
def init_weights(m: nn.Module):
for name, param in m.named_parameters():
if 'weight' in name:
nn.init.normal_(param.data, mean=0, std=0.01)
else:
nn.init.constant_(param.data, 0)
def _get_args():
"""Gets command line arguments"""
project_path = os.path.abspath(os.path.join(deepab.__file__, "../.."))
desc = ('''
Desc pending
''')
parser = argparse.ArgumentParser(
description=desc, formatter_class=RawTextArgumentDefaultsHelpFormatter)
# Model architecture arguments
parser.add_argument('--enc_hid_dim', type=int, default=64)
parser.add_argument('--dec_hid_dim', type=int, default=64)
# Training arguments
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--save_every', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=4)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--use_gpu',
type=bool,
default=False,
action="store_true")
parser.add_argument('--train_split', type=float, default=0.95)
default_h5_file = os.path.join(project_path, 'data/abSeq.h5')
parser.add_argument('--h5_file', type=str, default=default_h5_file)
now = str(datetime.now().strftime('%y-%m-%d %H:%M:%S'))
default_model_path = os.path.join(project_path,
'trained_models/model_{}/'.format(now))
parser.add_argument('--output_dir', type=str, default=default_model_path)
return parser.parse_args()
def _cli():
"""Command line interface for train.py when it is run as a script"""
args = _get_args()
device_type = 'cuda' if torch.cuda.is_available(
) and args.use_gpu else 'cpu'
device = torch.device(device_type)
properties = dict(seq_dim=23,
enc_hid_dim=args.enc_hid_dim,
dec_hid_dim=args.dec_hid_dim)
model = PairedSeqLSTM(**properties)
model.apply(init_weights)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
properties.update({'lr': args.lr})
# Load dataset loaders from h5 file
h5_file = args.h5_file
dataset = H5PairedSeqDataset(h5_file)
train_split_length = int(len(dataset) * args.train_split)
torch.manual_seed(0)
train_dataset, validation_dataset = data.random_split(
dataset, [train_split_length,
len(dataset) - train_split_length])
train_loader = data.DataLoader(
train_dataset,
batch_size=args.batch_size,
collate_fn=H5PairedSeqDataset.merge_samples_to_minibatch)
validation_loader = data.DataLoader(
validation_dataset,
batch_size=args.batch_size,
collate_fn=H5PairedSeqDataset.merge_samples_to_minibatch)
lr_modifier = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
verbose=True)
out_dir = args.output_dir
if not os.path.isdir(out_dir):
print('Making {} ...'.format(out_dir))
os.mkdir(out_dir)
writer = SummaryWriter(os.path.join(out_dir, 'tensorboard'))
print('Arguments:\n', args)
print('Model:\n', model)
train(model=model,
train_loader=train_loader,
validation_loader=validation_loader,
criterion=criterion,
optimizer=optimizer,
device=device,
epochs=args.epochs,
lr_modifier=lr_modifier,
writer=writer,
save_file=os.path.join(out_dir, 'model.p'),
save_every=args.save_every,
properties=properties)
if __name__ == '__main__':
_cli()
|
nilq/baby-python
|
python
|
from flask_wtf import FlaskForm as Form
from wtforms import StringField
from wtforms.validators import DataRequired
class LoginForm(Form):
username = StringField('username', validators=[DataRequired()])
password = StringField('password', validators=[DataRequired()])
|
nilq/baby-python
|
python
|
import os
import json
import numpy as np
import matplotlib.pyplot as plt
def compute_iou(box_1, box_2):
'''
This function takes a pair of bounding boxes and returns intersection-over-
union (IoU) of two bounding boxes.
'''
iou = np.random.random()
width = min(box_1[2], box_2[2]) - max(box_1[0], box_2[0]);
height = min(box_1[3], box_2[3]) - max(box_1[1], box_2[1]);
# Boxes don't intersect
if width<0 or height<0:
iou = 0
return iou
# Boxes intersect. Continue
intersect = width * height;
area_b1 = (box_1[2]-box_1[0]) * (box_1[3]-box_1[1])
area_b2 = (box_2[2]-box_2[0]) * (box_2[3]-box_2[1])
union = area_b1 + area_b2 - intersect
iou = intersect/union
if (iou < 0):
iou = 0
assert (iou >= 0) and (iou <= 1.0)
return iou
def compute_counts(preds, gts, iou_thr=0.5, conf_thr=0.5):
'''
This function takes a pair of dictionaries (with our JSON format; see ex.)
corresponding to predicted and ground truth bounding boxes for a collection
of images and returns the number of true positives, false positives, and
false negatives.
<preds> is a dictionary containing predicted bounding boxes and confidence
scores for a collection of images.
<gts> is a dictionary containing ground truth bounding boxes for a
collection of images.
'''
TP = 0
FP = 0
FN = 0
'''
BEGIN YOUR CODE
'''
associated = [] # list of predictions that have already been associated
for pred_file, pred in preds.items():
gt = gts[pred_file]
for i in range(len(gt)):
iou_max = iou_thr
best_pred = -1
for j in range(len(pred)):
iou = compute_iou(pred[j][:4], gt[i])
conf = pred[j][4]
# Check if object can be associated, and is not already associated
# if iou greater than max, greater than thresh
if (iou > iou_max and conf > conf_thr and j not in associated):
iou_max = iou
best_pred = j
if best_pred != -1: # An object was correctly detected - true positive
TP = TP+1
associated.append(j)
else: # No detection made - false negative
FN = FN+1
# Count total number of predictions meeting threshold
P = 0
for pred_file, pred in preds.items():
for j in range(len(pred)):
conf = pred[j][4]
if conf > conf_thr:
P = P+1
# False positive: total positive - true positives
FP = P - TP
'''
END YOUR CODE
'''
return TP, FP, FN
global plot_PR_graph
def plot_PR_graph(use_train=True, use_weak=False, thresh=0.5):
''' Load in data to gts and preds '''
if use_weak:
with open(os.path.join(gts_path, 'annotations_train.json'),'r') as f:
gts_train = json.load(f)
with open(os.path.join(gts_path, 'annotations_test.json'),'r') as f:
gts_test = json.load(f)
gts = {**gts_train, **gts_test}
with open(os.path.join(preds_path,'preds_train_weak.json'),'r') as f:
preds_train = json.load(f)
with open(os.path.join(preds_path,'preds_test_weak.json'),'r') as f:
preds_test = json.load(f)
preds = {**preds_train, **preds_test}
else:
if use_train:
with open(os.path.join(gts_path, 'annotations_train.json'),'r') as f:
gts = json.load(f)
with open(os.path.join(preds_path,'preds_train.json'),'r') as f:
preds = json.load(f)
else:
with open(os.path.join(gts_path, 'annotations_test.json'),'r') as f:
gts = json.load(f)
with open(os.path.join(preds_path,'preds_test.json'),'r') as f:
preds = json.load(f)
# Load in confidence values
confidence_thrs = []
for fname in preds:
for i in range(len(preds[fname])):
pred = preds[fname][i]
confidence_thrs.append(np.array([pred[4]], dtype=float))
# Compute the counds
tp = np.zeros(len(confidence_thrs))
fp = np.zeros(len(confidence_thrs))
fn = np.zeros(len(confidence_thrs))
for i, conf_thr in enumerate(confidence_thrs):
tp[i], fp[i], fn[i] = compute_counts(preds, gts, iou_thr=thresh, conf_thr=conf_thr)
# Plot training set PR curves
precision = (tp / (fp + tp))# true/total predictions
recall = (tp / (fn + tp)) # detected/total objects
inds = np.lexsort((precision, recall))
plot = [recall[inds],precision[inds]]
plt.plot(plot[0][:], plot[1][:], label=thresh)
# set a path for predictions and annotations:
preds_path = '../data/hw02_preds'
gts_path = '../../data/hw02_annotations'
# load splits:
split_path = '../../data/hw02_splits'
file_names_train = np.load(os.path.join(split_path,'file_names_train.npy'))
file_names_test = np.load(os.path.join(split_path,'file_names_test.npy'))
plot_PR_graph(use_train=True, use_weak=True, thresh=0.75)
plot_PR_graph(use_train=True, use_weak=True, thresh=0.5)
plot_PR_graph(use_train=True, use_weak=True, thresh=0.25)
plot_PR_graph(use_train=True, use_weak=True, thresh=0.01)
plt.legend(loc="bottom left")
plt.xlabel('R')
plt.ylabel('P')
plt.show()
# =============================================================================
# '''
# Load data.
# '''
# with open(os.path.join(preds_path,'preds_train.json'),'r') as f:
# preds_train = json.load(f)
#
# with open(os.path.join(gts_path, 'annotations_train.json'),'r') as f:
# gts_train = json.load(f)
#
# with open(os.path.join(preds_path,'preds_test.json'),'r') as f:
# preds_test = json.load(f)
#
# with open(os.path.join(gts_path, 'annotations_test.json'),'r') as f:
# gts_test = json.load(f)
#
# # For a fixed IoU threshold, vary the confidence thresholds.
# # The code below gives an example on the training set for one IoU threshold.
# confidence_thrs = []
# for fname in preds_train:
# for i in range(len(preds_train[fname])):
# pred = preds_train[fname][i]
# confidence_thrs.append(np.array([pred[4]], dtype=float))
# tp_train = np.zeros(len(confidence_thrs))
# fp_train = np.zeros(len(confidence_thrs))
# fn_train = np.zeros(len(confidence_thrs))
# for i, conf_thr in enumerate(confidence_thrs):
# tp_train[i], fp_train[i], fn_train[i] = compute_counts(preds_train, gts_train, iou_thr=0.01, conf_thr=conf_thr)
#
# # Plot training set PR curves
# precision = (tp_train/(fp_train+tp_train))# true/total predictions
# recall = (tp_train/(fn_train+tp_train)) # detected/total objects
# inds = np.lexsort((precision, recall))
# plot = [recall[inds],precision[inds]]
# plt.plot(plot[0][:], plot[1][:])
#
# =============================================================================
|
nilq/baby-python
|
python
|
#
# Project:
# glideinWMS
#
# File Version:
#
# Description:
# This module implements functions and classes
# to handle forking of processes
# and the collection of results
#
# Author:
# Burt Holzman and Igor Sfiligoi
#
import cPickle
import os
import time
import select
from pidSupport import register_sighandler, unregister_sighandler, termsignal
import logSupport
class ForkResultError(RuntimeError):
def __init__(self, nr_errors, good_results, failed=[]):
RuntimeError.__init__(self, "Found %i errors" % nr_errors)
self.nr_errors = nr_errors
self.good_results = good_results
self.failed = failed
################################################
# Low level fork and collect functions
def fork_in_bg(function, *args):
# fork and call a function with args
# return a dict with {'r': fd, 'pid': pid} where fd is the stdout from a pipe.
# example:
# def add(i, j): return i+j
# d = fork_in_bg(add, i, j)
r, w = os.pipe()
unregister_sighandler()
pid = os.fork()
if pid == 0:
logSupport.disable_rotate = True
os.close(r)
try:
out = function(*args)
os.write(w, cPickle.dumps(out))
finally:
os.close(w)
# Exit, immediately. Don't want any cleanup, since I was created
# just for performing the work
os._exit(0)
else:
register_sighandler()
os.close(w)
return {'r': r, 'pid': pid}
###############################
def fetch_fork_result(r, pid):
"""
Used with fork clients
@type r: pipe
@param r: Input pipe
@type pid: int
@param pid: pid of the child
@rtype: Object
@return: Unpickled object
"""
try:
rin = ""
s = os.read(r, 1024*1024)
while (s != ""): # "" means EOF
rin += s
s = os.read(r,1024*1024)
finally:
os.close(r)
os.waitpid(pid, 0)
out = cPickle.loads(rin)
return out
def fetch_fork_result_list(pipe_ids):
"""
Read the output pipe of the children, used after forking to perform work
and after forking to entry.writeStats()
@type pipe_ids: dict
@param pipe_ids: Dictinary of pipe and pid
@rtype: dict
@return: Dictionary of fork_results
"""
out = {}
failures = 0
failed = []
for key in pipe_ids:
try:
# Collect the results
out[key] = fetch_fork_result(pipe_ids[key]['r'],
pipe_ids[key]['pid'])
except Exception, e:
logSupport.log.warning("Failed to extract info from child '%s'" % key)
logSupport.log.exception("Failed to extract info from child '%s'" % key)
# Record failed keys
failed.append(key)
failures += 1
if failures>0:
raise ForkResultError(failures, out, failed=failed)
return out
def fetch_ready_fork_result_list(pipe_ids):
"""
Read the output pipe of the children, used after forking. If there is data
on the pipes to consume, read the data and close the pipe.
and after forking to entry.writeStats()
@type pipe_ids: dict
@param pipe_ids: Dictinary of pipe and pid
@rtype: dict
@return: Dictionary of work_done
"""
work_info = {}
failures = 0
failed = []
fds_to_entry = dict((pipe_ids[x]['r'], x) for x in pipe_ids)
readable_fds = select.select(fds_to_entry.keys(), [], [], 0)[0]
for fd in readable_fds:
try:
key = fds_to_entry[fd]
pid = pipe_ids[key]['pid']
out = fetch_fork_result(fd, pid)
work_info[key] = out
except Exception, e:
logSupport.log.warning("Failed to extract info from child '%s'" % str(key))
logSupport.log.exception("Failed to extract info from child '%s'" % str(key))
# Record failed keys
failed.append(key)
failures += 1
if failures>0:
raise ForkResultError(failures, work_info, failed=failed)
return work_info
def wait_for_pids(pid_list):
"""
Wait for all pids to finish.
Throw away any stdout or err
"""
for pidel in pid_list:
pid=pidel['pid']
r=pidel['r']
try:
#empty the read buffer first
s=os.read(r,1024)
while (s!=""): # "" means EOF
s=os.read(r,1024)
finally:
os.close(r)
os.waitpid(pid,0)
################################################
# Fork Class
class ForkManager:
def __init__(self):
self.functions_tofork = {}
# I need a separate list to keep the order
self.key_list = []
return
def __len__(self):
return len(self.functions_tofork)
def add_fork(self, key, function, *args):
if key in self.functions_tofork:
raise KeyError("Fork key '%s' already in use"%key)
self.functions_tofork[key] = ( (function, ) + args)
self.key_list.append(key)
def fork_and_wait(self):
pids=[]
for key in self.key_list:
pids.append(fork_in_bg(*self.functions_tofork[key]))
wait_for_pids(pids)
def fork_and_collect(self):
pipe_ids = {}
for key in self.key_list:
pipe_ids[key] = fork_in_bg(*self.functions_tofork[key])
results = fetch_fork_result_list(pipe_ids)
return results
def bounded_fork_and_collect(self, max_forks,
log_progress=True, sleep_time=0.01):
post_work_info = {}
nr_errors = 0
pipe_ids = {}
forks_remaining = max_forks
functions_remaining = len(self.functions_tofork)
# try to fork all the functions
for key in self.key_list:
# Check if we can fork more
if (forks_remaining == 0):
if log_progress:
# log here, since we will have to wait
logSupport.log.info("Active forks = %i, Forks to finish = %i"%(max_forks,functions_remaining))
while (forks_remaining == 0):
failed_keys = []
# Give some time for the processes to finish the work
# logSupport.log.debug("Reached parallel_workers limit of %s" % parallel_workers)
time.sleep(sleep_time)
# Wait and gather results for work done so far before forking more
try:
# logSupport.log.debug("Checking finished workers")
post_work_info_subset = fetch_ready_fork_result_list(pipe_ids)
except ForkResultError, e:
# Collect the partial result
post_work_info_subset = e.good_results
# Expect all errors logged already, just count
nr_errors += e.nr_errors
functions_remaining -= e.nr_errors
failed_keys = e.failed
post_work_info.update(post_work_info_subset)
forks_remaining += len(post_work_info_subset)
functions_remaining -= len(post_work_info_subset)
for i in (post_work_info_subset.keys() + failed_keys):
del pipe_ids[i]
#end for
#end while
# yes, we can, do it
pipe_ids[key] = fork_in_bg(*self.functions_tofork[key])
forks_remaining -= 1
#end for
if log_progress:
logSupport.log.info("Active forks = %i, Forks to finish = %i"%(max_forks-forks_remaining,functions_remaining))
# now we just have to wait for all to finish
while (functions_remaining>0):
failed_keys = []
# Give some time for the processes to finish the work
time.sleep(sleep_time)
# Wait and gather results for work done so far before forking more
try:
# logSupport.log.debug("Checking finished workers")
post_work_info_subset = fetch_ready_fork_result_list(pipe_ids)
except ForkResultError, e:
# Collect the partial result
post_work_info_subset = e.good_results
# Expect all errors logged already, just count
nr_errors += e.nr_errors
functions_remaining -= e.nr_errors
failed_keys = e.failed
post_work_info.update(post_work_info_subset)
forks_remaining += len(post_work_info_subset)
functions_remaining -= len(post_work_info_subset)
for i in (post_work_info_subset.keys() + failed_keys):
del pipe_ids[i]
if len(post_work_info_subset)>0:
if log_progress:
logSupport.log.info("Active forks = %i, Forks to finish = %i"%(max_forks-forks_remaining,functions_remaining))
#end while
if nr_errors>0:
raise ForkResultError(nr_errors, post_work_info)
return post_work_info
|
nilq/baby-python
|
python
|
from click import Option
from preacher.app.cli.executor import PROCESS_POOL_FACTORY, THREAD_POOL_FACTORY
from preacher.app.cli.option import LevelType, ExecutorFactoryType
from preacher.core.status import Status
def test_level_type():
tp = LevelType()
param = Option(["--level"])
assert tp.get_metavar(param) == "[skipped|success|unstable|failure]"
assert tp.get_missing_message(param) == (
"Choose from:\n\tskipped,\n\tsuccess,\n\tunstable,\n\tfailure"
)
assert tp.convert("skipped", None, None) == Status.SKIPPED
assert tp.convert("SUCCESS", None, None) == Status.SUCCESS
assert tp.convert("UnStable", None, None) == Status.UNSTABLE
assert tp.convert("FAILURE", None, None) == Status.FAILURE
assert tp.convert(Status.SUCCESS, None, None) == Status.SUCCESS
def test_executor_factory_type():
tp = ExecutorFactoryType()
param = Option(["--executor"])
assert tp.get_metavar(param) == "[process|thread]"
assert tp.get_missing_message(param) == "Choose from:\n\tprocess,\n\tthread"
assert tp.convert("process", None, None) is PROCESS_POOL_FACTORY
assert tp.convert("Thread", None, None) is THREAD_POOL_FACTORY
assert tp.convert(PROCESS_POOL_FACTORY, None, None) is PROCESS_POOL_FACTORY
|
nilq/baby-python
|
python
|
# Represents smallest unit of a list with value, reference to succeding Node and referenece previous Node
class Node(object):
def __init__(self, value, succeeding=None, previous=None):
pass
class LinkedList(object):
def __init__(self):
pass
|
nilq/baby-python
|
python
|
## @package csnListener
# Definition of observer pattern related classes.
class Event:
""" Generic event class. """
def __init__(self, code, source):
self.__code = code
self.__source = source
def GetCode(self):
return self.__code
def GetSource(self):
return self.__source
def ToString(self):
if self.IsNull():
return "null"
elif self.IsChange():
return "change"
elif self.IsProgress():
return "progress"
else:
return None
def GetNullCode(self):
return 0
def GetChangeCode(self):
return 1
def GetProgressCode(self):
return 2
def IsNull(self):
return self.__code == self.GetNullCode()
def IsChange(self):
return self.__code == self.GetChangeCode()
def IsProgress(self):
return self.__code == self.GetProgressCode()
class ChangeEvent(Event):
""" Change event class. """
def __init__(self, source):
Event.__init__(self, self.GetChangeCode(), source)
class ProgressEvent(Event):
""" Change event class. """
def __init__(self, source, progress, message = ""):
self.__progress = progress
self.__message = message
Event.__init__(self, self.GetProgressCode(), source)
def GetProgress(self):
return self.__progress
def GetMessage(self):
return self.__message
class Listener:
""" Generic listener class. """
def __init__(self, source):
self._source = source
def GetSource(self):
""" Get the listener source. """
return self._source
def Update(self):
""" Abstract. """
class ChangeListener(Listener):
""" Listener for ChangeEvent. The listener source needs to implement StateChanged(event). """
def Update(self, event):
""" Call the source to tell it the state has changed. """
if event.IsChange():
self._source.StateChanged(event)
class ProgressListener(Listener):
""" Listener for ProgressEvent. The listener source needs to implement ProgressChanged(event). """
def Update(self, event):
""" Call the source to tell it the state has changed. """
if event.IsProgress():
self._source.ProgressChanged(event)
|
nilq/baby-python
|
python
|
#code for feature1
|
nilq/baby-python
|
python
|
import unittest
import pytest
from botocore.exceptions import ClientError
from localstack import config
from localstack.utils.aws import aws_stack
from localstack.utils.common import short_uid
from .lambdas import lambda_integration
from .test_integration import PARTITION_KEY, TEST_TABLE_NAME
TEST_STREAM_NAME = lambda_integration.KINESIS_STREAM_NAME
def should_run():
return config.is_env_true("TEST_ERROR_INJECTION")
class TestErrorInjection(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
if not should_run():
pytest.skip("skipping TestErrorInjection (TEST_ERROR_INJECTION not set or false)")
def test_kinesis_error_injection(self):
kinesis = aws_stack.connect_to_service("kinesis")
aws_stack.create_kinesis_stream(TEST_STREAM_NAME)
records = [{"Data": "0", "ExplicitHashKey": "0", "PartitionKey": "0"}]
# by default, no errors
test_no_errors = kinesis.put_records(StreamName=TEST_STREAM_NAME, Records=records)
assert test_no_errors["FailedRecordCount"] == 0
# with a probability of 1, always throw errors
config.KINESIS_ERROR_PROBABILITY = 1.0
test_all_errors = kinesis.put_records(StreamName=TEST_STREAM_NAME, Records=records)
assert test_all_errors["FailedRecordCount"] == 1
# reset probability to zero
config.KINESIS_ERROR_PROBABILITY = 0.0
def get_dynamodb_table(self):
dynamodb = aws_stack.connect_to_resource("dynamodb")
# create table with stream forwarding config
aws_stack.create_dynamodb_table(TEST_TABLE_NAME, partition_key=PARTITION_KEY)
return dynamodb.Table(TEST_TABLE_NAME)
def assert_zero_probability_read_error_injection(self, table, partition_key):
# by default, no errors
test_no_errors = table.get_item(Key={PARTITION_KEY: partition_key})
assert test_no_errors["ResponseMetadata"]["HTTPStatusCode"] == 200
def test_dynamodb_error_injection(self):
table = self.get_dynamodb_table()
partition_key = short_uid()
self.assert_zero_probability_read_error_injection(table, partition_key)
# with a probability of 1, always throw errors
config.DYNAMODB_ERROR_PROBABILITY = 1.0
with self.assertRaises(ClientError):
table.get_item(Key={PARTITION_KEY: partition_key})
# reset probability to zero
config.DYNAMODB_ERROR_PROBABILITY = 0.0
def test_dynamodb_read_error_injection(self):
table = self.get_dynamodb_table()
partition_key = short_uid()
self.assert_zero_probability_read_error_injection(table, partition_key)
# with a probability of 1, always throw errors
config.DYNAMODB_READ_ERROR_PROBABILITY = 1.0
with self.assertRaises(ClientError):
table.get_item(Key={PARTITION_KEY: partition_key})
# reset probability to zero
config.DYNAMODB_READ_ERROR_PROBABILITY = 0.0
def test_dynamodb_write_error_injection(self):
table = self.get_dynamodb_table()
# by default, no errors
test_no_errors = table.put_item(Item={PARTITION_KEY: short_uid(), "data": "foobar123"})
self.assertEqual(200, test_no_errors["ResponseMetadata"]["HTTPStatusCode"])
# with a probability of 1, always throw errors
config.DYNAMODB_WRITE_ERROR_PROBABILITY = 1.0
with self.assertRaises(ClientError):
table.put_item(Item={PARTITION_KEY: short_uid(), "data": "foobar123"})
# BatchWriteItem throws ProvisionedThroughputExceededException if ALL items in Batch are Throttled
with self.assertRaises(ClientError):
table.batch_write_item(
RequestItems={
table: [
{
"PutRequest": {
"Item": {
PARTITION_KEY: short_uid(),
"data": "foobar123",
}
}
}
]
}
)
# reset probability to zero
config.DYNAMODB_WRITE_ERROR_PROBABILITY = 0.0
|
nilq/baby-python
|
python
|
import json
import os
import tkinter as tk
def cancelclick():
exit(0)
class GUI:
def okclick(self):
data = {"url": self.urlentry.get(),
"telegramAPIKEY": self.apikeyentry.get(),
"telegramCHATID": self.chatidentry.get(),
"databaseFile": self.databaseentry.get(),
"sleep": self.sleepeentry.get()
}
with open(self.file_name, 'w') as config_file:
json.dump(data, config_file)
config_file.close()
self.window.destroy()
pass
def __init__(self, file_name):
self.file_name = file_name
self.window = tk.Tk()
self.window.geometry('605x300')
urllabel = tk.Label(text="url")
self.urlentry = tk.Entry(width=100, justify=tk.CENTER)
databaselabel = tk.Label(text="databaseFile")
self.databaseentry = tk.Entry(width=100, justify="center")
apikeylabel = tk.Label(text="telegramAPIKEY")
self.apikeyentry = tk.Entry(width=100, justify="center")
chatidlabel = tk.Label(text="telegramCHATID")
self.chatidentry = tk.Entry(width=100, justify="center")
sleeplabel = tk.Label(text="sleep")
self.sleepeentry = tk.Entry(width=100, justify="center")
okbutton = tk.Button(self.window, text="OK", command=self.okclick)
cancelbutton = tk.Button(self.window, text="Cancel", command=cancelclick)
urllabel.grid(column=0, row=0)
self.urlentry.grid(column=0, row=1)
databaselabel.grid(column=0, row=2)
self.databaseentry.grid(column=0, row=3)
apikeylabel.grid(column=0, row=4)
self.apikeyentry.grid(column=0, row=5)
chatidlabel.grid(column=0, row=6)
self.chatidentry.grid(column=0, row=7)
sleeplabel.grid(column=0, row=8)
self.sleepeentry.grid(column=0, row=9)
okbutton.grid(column=0, row=10)
cancelbutton.grid(column=0, row=11)
if os.path.isfile(file_name):
with open(file_name) as config_file:
config = json.load(config_file)
self.urlentry.insert(0, config["url"])
self.apikeyentry.insert(0, config["telegramAPIKEY"])
self.chatidentry.insert(0, config["telegramCHATID"])
self.databaseentry.insert(0, config["databaseFile"])
self.sleepeentry.insert(0, config["sleep"])
config_file.close()
else:
self.databaseentry.insert(0, "database.db")
self.sleepeentry.insert(0, "10")
self.window.mainloop()
|
nilq/baby-python
|
python
|
import io
from collections import deque
from concurrent.futures import ThreadPoolExecutor
import numba
from bampy.mt import CACHE_JIT, THREAD_NAME, DEFAULT_THREADS
from . import zlib
from ...bgzf import Block
from ...bgzf.reader import BufferReader, EmptyBlock, StreamReader, _Reader as __Reader
@numba.jit(nopython=True, nogil=True, cache=CACHE_JIT)
def inflate(data, buffer, offset=0):
zlib.raw_decompress(data, buffer[offset:])
return buffer, offset
class _Reader(__Reader):
"""
Base class for buffer and stream readers.
Provides Iterable interface to read in blocks.
"""
def __init__(self, input, threadpool: ThreadPoolExecutor):
"""
Constructor.
:param input: Block data source.
"""
super().__init__(input)
self.pool = threadpool
self.blockqueue = deque()
self.max_queued = 0
def __iter__(self):
return self
def __next__(self):
raise NotImplementedError()
def Reader(input, offset: int = 0, peek=None, threadpool: ThreadPoolExecutor = ThreadPoolExecutor(max_workers=DEFAULT_THREADS, thread_name_prefix=THREAD_NAME)) -> _Reader:
"""
Factory to provide a unified reader interface.
Resolves if input is randomly accessible and provides the appropriate _Reader implementation.
:param input: A stream or buffer object.
:param offset: If input is a buffer, the offset into the buffer to begin reading. Ignored otherwise.
:param peek: Data consumed from stream while peeking. Will be prepended to read data. Ignored if buffer passed as input.
:return: An instance of StreamReader or BufferReader.
"""
if isinstance(input, (io.RawIOBase, io.BufferedIOBase)):
return StreamReader(input, peek, threadpool)
else:
return BufferReader(input, offset, threadpool)
class StreamReader(_Reader):
"""
Implements _Reader to handle input data that is not accessible through a buffer interface.
"""
def __init__(self, input, peek=None, threadpool: ThreadPoolExecutor = ThreadPoolExecutor(max_workers=DEFAULT_THREADS, thread_name_prefix=THREAD_NAME)):
"""
Constructor.
:param input: Stream object to read from.
:param peek: Data consumed from stream while peeking. Will be prepended to read data.
"""
super().__init__(input, threadpool)
self._peek = peek
def __next__(self):
if not self.max_queued or not self.blockqueue[0].done(): self.max_queued += 1
try:
while len(self.blockqueue) < self.max_queued:
block, cdata = Block.from_stream(self._input, self._peek)
self._peek = None
self.total_in += len(block)
self.total_out += block.uncompressed_size
if block.uncompressed_size:
self.blockqueue.append(self.pool.submit(inflate, cdata, memoryview(bytearray(block.uncompressed_size)))) # TODO reuse buffers
else:
raise EmptyBlock()
except EOFError:
pass
if not len(self.blockqueue):
raise StopIteration()
self.buffer = self.blockqueue.popleft().result()
self.remaining = len(self.buffer)
return self.buffer
class BufferReader(_Reader):
"""
Implements _Reader to handle input data that is accessible through a buffer interface.
"""
def __init__(self, input, offset=0, threadpool: ThreadPoolExecutor = ThreadPoolExecutor(max_workers=DEFAULT_THREADS, thread_name_prefix=THREAD_NAME)):
"""
Constructor.
:param input: Buffer object to read from.
:param offset: The offset into the input buffer to begin reading from.
"""
super().__init__(input, threadpool)
self._len = len(input)
self.offset = offset
def __next__(self):
if not self.max_queued or not self.blockqueue[0].done(): self.max_queued += 1
while self.offset < self._len and len(self.blockqueue) < self.max_queued:
block, cdata = Block.from_buffer(self._input, self.offset)
block_len = len(block)
self.offset += block_len
self.total_in += block_len
self.total_out += block.uncompressed_size
if block.uncompressed_size:
self.blockqueue.append(self.pool.submit(inflate, cdata, memoryview(bytearray(block.uncompressed_size)))) # TODO reuse buffers
else:
raise EmptyBlock()
if not len(self.blockqueue):
raise StopIteration()
self.buffer = self.blockqueue.popleft().result()
self.remaining = len(self.buffer)
return self.buffer
|
nilq/baby-python
|
python
|
"""test format
Revision ID: b45b1bf02a80
Revises: a980b74a499f
Create Date: 2022-05-11 22:31:19.613893
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b45b1bf02a80'
down_revision = 'a980b74a499f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
nilq/baby-python
|
python
|
from myhdl import *
import random
from random import randrange
from .alu import def_alu
from .ctrl import def_ctrl
random.seed(4)
def test_ctrl():
"""Test bench for the ALU control.
"""
clk = Signal(bool(0))
reset = ResetSignal(0, active=1, async=True)
alu_op = Signal(intbv(0)[2:])
funct = Signal(intbv(0)[6:])
alu_ctrl = Signal(intbv(0)[4:])
alu_op1 = Signal(intbv(0, min=-2**32, max=2**32))
alu_op2 = Signal(intbv(0, min=-2**32, max=2**32))
alu_res = Signal(intbv(0, min=-2**32, max=2**32))
alu_z = Signal(bool(0))
ctrl_inst = def_ctrl(alu_op, funct, alu_ctrl)
alu_inst = def_alu(alu_op1, alu_op2, alu_ctrl, alu_res, alu_z)
@always(delay(10))
def tb_clk():
clk.next = not clk
@instance
def tb_ctrl():
oplist = [0,1,2] # 2bit : [00,01,10]
functlist = [32,34,36,37,42] #[100000,100010,100100,100101,101010]
for ii in range(100):
r_op = oplist[randrange(3)]
r_func = functlist[randrange(5)]
op1, op2 = randrange(-2**31, 2**31), randrange(-2**31, 2**31)
if (r_op == 0):
res = op1 + op2
elif r_op == 1:
res = op1 - op2
elif r_op == 2:
if r_func == 32:
res = op1 + op2
elif r_func == 34:
res = op1 - op2
elif r_func == 36:
res = op1 & op2
elif r_func == 37:
res = op1 | op2
elif r_func == 42:
if op1 < op2:
res = 1
else: res = 0
alu_op.next = r_op
funct.next = r_func
alu_op1.next,alu_op2.next = op1, op2
yield delay(10)
assert res == alu_res
if res == 0:
assert alu_z == 1
raise StopSimulation
# run simulation on test bench
sim = Simulation(ctrl_inst, alu_inst, tb_clk, tb_ctrl)
sim.run()
|
nilq/baby-python
|
python
|
import asyncio
import logging
from datetime import datetime, timedelta
from .login import BiliUser
from .api import WebApi, medals, get_info, WebApiRequestError
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger("dailyclockin")
class DailyClockIn:
def __init__(self, user: BiliUser):
self.user = user
async def do_work(self):
logger.info("开始每日弹幕打卡任务")
err_num = 0
try:
rooms = []
async for m in medals(self.user.session):
rooms.append(m)
try:
info = await get_info(self.user.session, m["roomid"])
except KeyError:
continue
try:
await WebApi.send_msg(self.user.session, info["room_id"], self.user.csrf)
logger.info(f"{m['uname']}({m['target_id']})直播间打卡成功")
except Exception as e:
message_err = f"{m['uname']}({m['target_id']})直播间打卡失败: {e}"
logger.error(message_err)
self.user.message_err.append(message_err)
err_num += 1
await asyncio.sleep(6)
except Exception as e:
logger.error(e)
self.user.message_err.append(e)
err_num += 1
self.user.message.append(
f"弹幕打卡成功: {len(rooms) - err_num}/{len(rooms)}"
)
if self.user.ruid:
medal_0 = (await WebApi.get_weared_medal(self.user.session, self.user.csrf))
if medal_0:
medal_0_id = medal_0['medal_id']
await asyncio.sleep(1)
await WebApi.wear_medal(
self.user.session, self.user.medal_id, self.user.csrf
) # wear medal
medal = await WebApi.get_weared_medal(self.user.session, self.user.csrf)
if medal["today_feed"] == 0 and medal['level'] > 20:
self.user.message_err.append(f"{medal['medal_name']}{medal['level']}级大于20级,打卡不加亲密度,只会点亮牌子")
return
if medal["today_feed"] == 0:
self.user.message_err.append(f"你设置的主播亲密度获取失败")
return
now = datetime.now()
now += timedelta(
days=(medal["next_intimacy"] - medal["intimacy"]) // medal["today_feed"]
+ 1
)
message = f"目前:{medal['medal_name']}{medal['level']}级\n今日亲密度:{medal['today_feed']}/{medal['day_limit']}\n当前等级上限:{medal['intimacy']}/{medal['next_intimacy']}\n预计还需要{(medal['next_intimacy'] - medal['intimacy']) // medal['today_feed'] + 1}天({now.strftime('%m.%d')})到达{medal['level'] + 1}级 "
self.user.message.append(message)
if medal_0:
await asyncio.sleep(1)
await WebApi.wear_medal(
self.user.session, medal_0_id, self.user.csrf
)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2016-2017 China Telecommunication Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from _mysql import result
from MySQLdb.constants.CR import IPSOCK_ERROR
__author__ = 'pzhang'
import tornado.web
import json
import time
from db_util import mysql_utils
ip2num = lambda x:sum([256**j*int(i) for j,i in enumerate(x.split('.')[::-1])])
num2ip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)])
def get_mask_int(mask):
sum=0
for i in range(mask):
sum = sum*2+1
sum = sum << (32-mask)
return sum
class ms_customer_handler(tornado.web.RequestHandler):
def initialize(self):
super(ms_customer_handler, self).initialize()
self.resp_func = {'ms_cust_get_customer':self.get_customer,
'ms_cust_add_customer':self.add_customer,
'ms_cust_del_customer':self.del_customer,
'ms_cust_update_customer':self.update_customer,
'ms_cust_get_customer_by_ip':self.get_customer_by_ip,
'ms_cust_add_flow':self.add_flow,
'ms_cust_del_flow':self.del_flow,
'ms_cust_update_flow':self.update_flow,
'ms_cust_set_flow_speed':self.set_flow_speed
}
self.log = 0
self.ip_cust_map = {}
pass
def form_response(self, req):
resp = {}
resp['response'] = req['request']
#resp['ts'] = req['ts']
resp['ts'] = time.strftime("%Y%m%d%H%M%S")
resp['trans_id'] = req['trans_id']
resp['err_code'] = 0
resp['msg'] = ''
self.set_header('Content-Type', 'application/json')
return resp
def post(self):
ctnt = self.request.body
if self.log == 1:
print 'The request:'
print str(ctnt)
req = json.loads(str(ctnt))
resp = self.form_response(req)
result = self.resp_func[req['request']](req['args'])
resp['result'] = result
if self.log == 1:
print 'response:'
print json.dumps(resp)
self.write(json.dumps(resp))
pass
def array_to_inlist(self, arr):
lst = '(' + ",".join(arr) + ')'
return lst
def get_customer(self, args):
customers = {}
sql_str = 'select * from t_customer join t_customer_ip on t_customer_ip.customer_id = t_customer.id'
if 'uids' in args:
uids = args['uids']
lst = self.array_to_inlist(uids)
sql_str += ' where t_customer.id in' + lst
db = mysql_utils('customer')
results = db.exec_sql(sql_str)
db.close()
if results is None:
return {'customers':[]}
cs_map = {}
cs = []
for c in results:
uid = str(c[0])
if uid in cs_map:
one_c = cs_map[uid]
else:
one_c = {'uid':str(c[0]), 'name':c[1] }
cs_map[uid] = one_c
ip = str(c[6]) + '/' + str(c[7])
#FIXME: src and dst.
if 'ips' in one_c:
one_c['ips'].append({'dst':ip, 'src':ip, 'uid':str(c[3])})
else:
one_c['ips'] = [{'dst':ip, 'src':ip, 'uid':str(c[3])}]
pass
cs = [cs_map[c] for c in cs_map]
customers['customers'] = cs
return customers
def del_customer(self, args):
uids = args['uids']
lst = self.array_to_inlist(uids)
sql_str = 'delete from t_customer where t_customer.id in %s' % lst
# print sql_str
db = mysql_utils('customer')
result = db.exec_sql(sql_str)
if not result:
db.commit()
db.close()
return result
def add_customer(self, args):
customer = {}
customer['name'] = args['name']
#customer['uid'] = args['uid']
#print customer
#insert into t_customer values (1, 'Google');
#sql_str = 'insert into t_customer(id,name) values (%s, \'%s\')' % (customer['uid'], customer['name'])
sql_str = 'insert into t_customer(name) values (\'%s\')' % customer['name']
#print sql_str
db = mysql_utils('customer')
result = db.exec_sql(sql_str)
if not result:
db.commit()
customer_id = db.exec_sql('SELECT LAST_INSERT_ID()')[0][0]
#print customer_id
#insert into t_customer_ip values (1, 1, 16843009, '1.1.1.0', 4294967040, '255.255.255.0');
if args.has_key('ips'):
ips = args['ips']
for ip in ips:
ip_addr = ip['src'].split('/')[0]
ip_mask = int(ip['src'].split('/')[1])
sql_str = 'insert into t_customer_ip(customer_id,netip,netip_str,mask_bit,mask_int) values (%s, %s, \'%s\', %s, %s)' \
% (customer_id, ip2num(ip_addr), ip_addr, ip_mask, get_mask_int(ip_mask))
print sql_str
result = db.exec_sql(sql_str)
if not result:
db.commit()
db.close()
return {"cust_uid": customer_id}
def update_customer(self,args):
customer = {}
name = args['name']
uid = args['uid']
if args.has_key('ips'):
ips = args['ips']
#check if customer exist
sql_str = 'select * from t_customer where id = %s' % uid
db = mysql_utils('customer')
result = db.exec_sql(sql_str)
#print result
#if not exist
if not result:
sql_str = 'insert into t_customer (id, name) VALUES (%s, \'%s\')' % (uid, name)
ret = db.exec_sql(sql_str)
db.commit()
#if exist
else:
sql_str = 'update t_customer set name = \'%s\' where id = %s' % (name, uid)
print sql_str
db.exec_sql(sql_str)
db.commit()
pass
db.close()
# To pzhang: Are you crazy?
# self.del_customer(args)
# self.add_customer(args)
pass
def get_customer_by_ip(self, args):
ips = args['ips']
cs = {}
for ip in ips:
# sql_str = 'select * from t_customer_ip inner join t_customer on t_customer_ip.customer_id = t_customer.id ' + \
# 'and t_customer_ip.netip & t_customer_ip.mask_int = %s & t_customer_ip.mask_int' % ip2num(ip)
# results = self.db.exec_sql(sql_str)
# if results:
# # print results
# cs[ip] = {'name':results[0][7], 'cust_uid':results[8]}
times = self.application.split_bits
mask = 0xFFFFFFFF
match = 0
nets = self.application.ip_cust_map
sub_ip = ip2num(ip)
while times > 0:
if sub_ip in nets:
match = 1
break
mask <<= 1
sub_ip &= mask
times -= 1
if match:
cs[ip] = nets[sub_ip]
pass
return cs
def add_flow(self,args):
customer = {}
customer_id = args['cust_uid']
#check if customer exist
sql_str = 'select * from t_customer where id=%s' % customer_id
db = mysql_utils('customer')
result = db.exec_sql(sql_str)
if not result:
return
#insert into t_customer_ip values (1, 1, 16843009, '1.1.1.0', 4294967040, '255.255.255.0');
flows = []
if args.has_key('flows'):
ips = args['flows']
for ip in ips:
one_flow = {}
one_flow['src'] = ip['src']
ip_addr = ip['src'].split('/')[0]
ip_mask = int(ip['src'].split('/')[1])
sql_str = 'insert into t_customer_ip(customer_id,netip,netip_str,mask_bit,mask_int) values (%s, %s, \'%s\', %s, %s)' \
% (customer_id, ip2num(ip_addr), ip_addr, ip_mask, get_mask_int(ip_mask))
print sql_str
result = db.exec_sql(sql_str)
if not result:
db.commit()
flow_id = db.exec_sql('SELECT LAST_INSERT_ID()')[0][0]
ip['uid'] = str(flow_id)
db.close()
#return the request object. the only difference is each added flow has 'uid' attrib
return args
def del_flow(self,args):
uids = args['flow_uids']
lst = self.array_to_inlist(uids)
sql_str = 'delete from t_customer_ip where t_customer_ip.id in %s' % lst
# print sql_str
db = mysql_utils('customer')
result = db.exec_sql(sql_str)
if not result:
db.commit()
db.close()
return result
def update_flow(self,args):
flows = args['flows']
db = mysql_utils('customer')
for flow in flows:
flow_id = flow['uid']
if 'src' in flow:
ip_addr = flow['src'].split('/')[0]
ip_mask = int(flow['src'].split('/')[1])
sql_str = 'update t_customer_ip set netip=%s,netip_str=\'%s\',mask_bit=%s,mask_int=%s where t_customer_ip.id=%s' \
% (ip2num(ip_addr), ip_addr, ip_mask, get_mask_int(ip_mask), flow_id)
print sql_str
result = db.exec_sql(sql_str)
if not result:
db.commit()
pass
db.close()
pass
def set_flow_speed(self,args):
pass
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.