id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
392267 | # -*- coding: utf-8 -*-
# Copyright 2010-2014, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Fix @@@...@@@ format of variables in the installer script templates for Mac.
% python tweak_macinstaller_script.py --output=out.txt --input=in.txt \
--version_file=version.txt [--build_type=dev] \
"""
__author__ = "mukai"
import logging
import optparse
import mozc_version
def _ReplaceVariables(data, environment):
"""Replace all occurrence of the variable in data by the value.
Args:
data: the original data string
environment: an iterable of (variable name, its value) pairs
Returns:
the data string which replaces the variables by the value.
"""
result = data
for (k, v) in environment:
result = result.replace(k, v)
return result
def ParseOptions():
"""Parse command line options.
Returns:
An options data.
"""
parser = optparse.OptionParser()
parser.add_option('--version_file', dest='version_file')
parser.add_option('--output', dest='output')
parser.add_option('--input', dest='input')
parser.add_option('--build_type', dest='build_type')
(options, unused_args) = parser.parse_args()
return options
def main():
"""The main function."""
options = ParseOptions()
required_flags = ['version_file', 'output', 'input']
for flag in required_flags:
if getattr(options, flag) is None:
logging.error('--%s is not specified.', flag)
exit(-1)
version = mozc_version.MozcVersion(options.version_file)
if options.build_type == 'dev':
omaha_tag = 'external-dev'
else:
omaha_tag = 'external-stable'
# This definition is copied from tools/scons/script.py
variables = [
('@@@MOZC_VERSION@@@', version.GetVersionString()),
('@@@MOZC_PRODUCT_ID@@@', 'com.google.JapaneseIME'),
('@@@MOZC_APP_PATH@@@', '/Library/Input Methods/GoogleJapaneseInput.app'),
('@@@MOZC_APPLICATIONS_DIR@@@',
'/Applications/GoogleJapaneseInput.localized'),
('@@@MOZC_OMAHA_TAG@@@', omaha_tag),
('@@@MOZC_PACKAGE_NAME@@@', 'GoogleJapaneseInput.pkg'),
]
open(options.output, 'w').write(
_ReplaceVariables(open(options.input).read(), variables))
if __name__ == '__main__':
main()
| StarcoderdataPython |
9715467 | """
mnist_svm
~~~~~~~~~
A classifier program for recognizing handwritten digits from the MNIST
data set, using an SVM classifier."""
#### Libraries
# My libraries
import mnist_loader
# Third-party libraries
from sklearn import svm
def svm_baseline():
training_data, validation_data, test_data = mnist_loader.load_data()
# train
clf = svm.SVC()
clf.fit(training_data[0], training_data[1])
# test
predictions = [int(a) for a in clf.predict(test_data[0])]
num_correct = sum(int(a == y) for a, y in zip(predictions, test_data[1]))
print("Baseline classifier using an SVM.")
print("%s of %s values correct." % (num_correct, len(test_data[1])))
if __name__ == "__main__":
svm_baseline()
"""Baseline classifier using an SVM.
9435 of 10000 values correct."""
| StarcoderdataPython |
9618978 | # Generated by Django 2.0.2 on 2018-03-04 18:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('topics', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Talk',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, unique=True)),
('md_file', models.FileField(blank=True, default=None, null=True, upload_to='announcements/')),
('github_url', models.URLField(blank=True, default=None, null=True)),
('add_date', models.DateTimeField(verbose_name='date added')),
('talk_date', models.DateTimeField(verbose_name='time')),
('status', models.IntegerField(choices=[(0, 'preparing'), (1, 'ready'), (2, 'finished'), (3, 'disqualified')], default=0)),
('topic', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='topics.Topic')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-talk_date'],
},
),
]
| StarcoderdataPython |
8100069 | <filename>sketches/krzywizna-calki.py
#!/usr/bin/env python
# coding: utf-8
import matplotlib.pyplot as plt
import latexrc
from numpy import linspace
from numpy import sqrt, cos, arccos, exp
#-------------------------------------------------------------------------------
# parameters
R = 6371.0
H = R / 50
Hscale = R / 500
Dmax = R * arccos(R / (R + H))
D = Dmax * 0.7
n = 4000
print "R = {:.2f}".format(R)
print "H = {:.2f}".format(H)
print "D = {:.2f}".format(D)
print "Hscale = {:.2f}".format(Hscale)
#-------------------------------------------------------------------------------
clr_flt = '#A5AF3A'
clr_rnd = '#0C62D5'
clr_apx = '#FF1F00'
from numpy import diff,cumsum
midpoints = lambda x: ( x[1:] + x[:-1] ) / 2
integral = lambda y,x: cumsum( midpoints(y) * diff(x) )
#-------------------------------------------------------------------------------
print u'FLAT EARTH'
L0 = sqrt(D**2 + H**2)
l0 = linspace(0,L0,n)
h0 = (H / L0) * l0
print "L = {:.2f}".format(L0)
ch0 = exp(-h0 / Hscale)
tau0 = integral(ch0,l0)
tau0th = Hscale * (1 - exp(- (H / L0) * l0 / Hscale)) / (H / L0)
#-------------------------------------------------------------------------------
fig,ax = plt.subplots()
ax.set_xlabel('$l$')
ax.set_ylabel('$\\tau$')
ax.set_title('Flat Earth')
ax.plot(l0[1:], tau0, color = '#19B2E3', label = 'numerical')
ax.plot(l0, tau0th, '--', color = '#103885', label = 'analytical')
ax.legend(loc = 'lower right')
#-------------------------------------------------------------------------------
print u'ROUND EARTH'
L1 = sqrt(H**2 + 2*R*(R+H)*(1 - cos(D/R)))
l1 = linspace(0,L1,n)
A = H * (H + 2*R) / L1**2
h1 = sqrt(R**2 + l1*L1*(A-1) + l1**2) - R
print "L = {:.2f}".format(L1)
print "A = {:.2f}".format(A)
ch1 = exp(-h1 / Hscale)
tau1 = integral(ch1,l1)
#-------------------------------------------------------------------------------
print u'APPROXIMATION'
L2 = sqrt(H**2 + (1 + H/R) * D**2)
l2 = linspace(0,L2,n)
h2 = l2 * (H / L2 - (L2 - l2) / (2*R))
print "L = {:.2f}".format(L2)
ch2 = exp(-h2 / Hscale)
tau2 = integral(ch2,l2)
from numpy import pi
from erf import erf
def liczcalke(R,D,H,L,l):
A = 2*R*H / L**2
D = sqrt(2*R*Hscale) / L
F = (A - 1) / (2*D)
f = l / sqrt(2*R*Hscale)
return L * sqrt(pi/4) * exp(F**2) * D * ( erf(F + f) - erf(F) )
tau2th = liczcalke(R,D,H,L2,l2)
#-------------------------------------------------------------------------------
fig,ax = plt.subplots()
ax.set_xlabel('$l$')
ax.set_ylabel('$\\tau$')
ax.set_title('Approximation Earth')
ax.plot(l2[1:], tau2, color = '#19B2E3', label = 'numerical')
ax.plot(l2, tau2th, '--', color = '#103885', label = 'analytical')
ax.legend(loc = 'lower right')
plt.savefig('/tmp/przyblizenie.pdf')
#-------------------------------------------------------------------------------
fig, axes = plt.subplots(1, 2, figsize = (5.9, 3.6))
ax = axes[0]
ax.set_xlabel('$l$')
ax.set_ylabel('$h(l)$')
ax.set_title('height vs path -- all three')
ax.plot(l0, h0, color = clr_flt, label = 'Flat Earth')
ax.plot(l1, h1, color = clr_rnd, label = 'Round Earth')
ax.plot(l2, h2, color = clr_apx, label = 'Approximation')
ax.legend(loc = 'lower right')
ax = axes[1]
ax.set_xlabel('$l$')
ax.set_ylabel('$\\tau$')
ax.set_title('flat vs round earth')
ax.plot(l0[1:], tau0, color = clr_flt, label = 'Flat Earth')
ax.plot(l1[1:], tau1, color = clr_rnd, label = 'Round Earth')
ax.plot(l2[1:], tau2, color = clr_apx, label = 'Approximation')
ax.legend(loc = 'lower right')
plt.subplots_adjust(0.08,0.12,0.95,0.92,0.25,0.25)
plt.savefig('/tmp/krzywizna.pdf')
#-------------------------------------------------------------------------------
from numpy import array
d = linspace(0,Dmax,n)
L0 = sqrt(H**2 + d**2)
tau0 = L0 * (Hscale / H) * (1 - exp(-H / Hscale))
L1 = sqrt(H**2 + (1 + H/R) * d**2)
A = 2*R*H / L1**2
B = sqrt(2*R*Hscale) / L1
tau1 = L1 * sqrt(pi/4) * exp((A - 1)**2 / (2*B)**2) * B \
* ( erf((A + 1) / (2*B)) - erf((A - 1) / (2*B)) )
tau2 = L1 * exp((1 - 2*A) / (2*B)**2)
tau2 = L1 * exp(L1**2 / (8 * R * Hscale)) * exp(- 0.5 * H / Hscale)
kkk = exp(- (A + 0.8)**2 / (2*B)**2) \
+ exp(- (A + 0.4)**2 / (2*B)**2) \
+ exp(- (A + 0.0)**2 / (2*B)**2) \
+ exp(- (A - 0.4)**2 / (2*B)**2) \
+ exp(- (A - 0.8)**2 / (2*B)**2)
tau3 = L1 * (kkk / 5) * exp((A - 1)**2 / (2*B)**2)
aa = lambda l: exp(-l / Hscale * ( H/L1 - (L1-l)/(2*R)))
tau4 = aa(0.1*L1) + aa(0.3*L1) + aa(0.5*L1) + aa(0.7*L1) + aa(0.9*L1)
tau4 *= L1 / 5
fig,axes = plt.subplots(2,2,figsize = (9,9))
ax = axes[0,0]
ax.plot(d,tau0, color = '#D7B212', label = 'flat')
ax.plot(d,tau1, color = '#006AB6', label = 'exact')
ax.plot(d,tau2, '--', color = '#31BD00', label = 'appx')
ax.plot(d,tau3, '--', color = '#CF03DA', label = 'appx2')
ax.plot(d,tau4, linewidth = 0.5, color = '#5E0091', label = 'appx3')
ax.set_ylabel('$\\tau')
# ax.set_ylim(0,2 * Hscale / H)
ax.legend(loc = 'best', fontsize = 10)
ax = axes[1,0]
# ax.plot(d, (A-1) / (2*B))
# ax.plot(d, A / (2*B))
# ax.plot(d, (A+1) / (2*B))
# ax.plot(d, 2 / (2*B), ':')
ax.plot(d,A)
ax = axes[0,1]
ax.plot(d, exp(-(A - 1)**2 / (2*B)**2), ':', label = '$\\exp\\left(-\\frac{(A - 1)^2}{4B^2}\\right)$')
ax.plot(d, sqrt(pi/4)*B*( erf((A + 1) / (2*B)) - erf((A - 1) / (2*B)) ), label = '$\\sqrt{\\pi/4}B\\left( {\\rm erf}\\left(\\frac{A + 1}{2B}\\right) - {\\rm erf}\\left(\\frac{A - 1}{2B}\\right) \\right)$')
ax.plot(d, exp(- A**2 / (2*B)**2), '--', label = '$\\exp\\left(- \\frac{A^2}{4B^2}\\right)$')
ax.plot(d, exp(- (2*A+1)**2 / (4*B)**2) + exp(- (2*A-1)**2 / (4*B)**2), color = '#CF03DA', linestyle = ':', label = 'new')
ax.set_yscale('log')
ax.legend(loc = 'best', fontsize = 10)
plt.savefig('/tmp/krzywizna2.pdf')
#-------------------------------------------------------------------------------
# plt.show()
| StarcoderdataPython |
352472 | import socket, cfg, time, subprocess
from threading import Thread
from modules.show_banner import *
from modules.clear_screen import *
from modules.socket_listener import *
from modules.cleanup_code import *
cfg.global_variables()
def listener_console():
while True:
try:
input = raw_input(cfg.prompt_listener).strip().split(' ', 1)
command = input[0]
if command == 'banner':
display_banner()
elif command == 'help':
print cfg.help_listener
elif command == 'quit':
cleanup()
elif command == 'clear':
clear()
elif command == 'back':
print '\n' + cfg.pos + 'Returning...'
return
elif command == 'kill':
killed_thread = False
try:
tar_id = int(input[1])
except:
print cfg.err + 'Argument needs to be a listener ID, eg. "kill 1"'
continue
for i in cfg.db_listeners:
if str(i[0]) == str(tar_id):
cfg.db_listeners.pop(cfg.db_listeners.index(i))
print cfg.pos + 'Killed active listener with ID : ' + str(tar_id)
killed_thread = True
break
if not killed_thread:
print cfg.err + 'Listener of ID "' + str(tar_id) + '" does not exist'
elif command == 'kill_all':
print cfg.pos + 'Killing all active listeners...'
cfg.db_listeners = []
elif command == 'start':
try:
port = int(input[1])
except:
print cfg.err + 'Argument needs to be an integer'
continue
t = Thread(target=start_listening, args=(port,))
t.start()
time.sleep(2)
elif command == 'show':
print '\n' + cfg.note + 'Currently active listeners :\n'
if cfg.db_listeners:
listener_data = [['ID', 'Port'], ['==', '====']]
for i in cfg.db_listeners:
listener_data.append([str(i[0]), str(i[1])])
col_width = max(len(word) for row in listener_data for word in row) + 2
for row in listener_data:
print "".join(word.ljust(col_width) for word in row)
print '\n'
elif command == '':
pass
elif command == 'local':
try:
execute = input[1]
print '\n' + cfg.note + 'Executing on system...\n'
except IndexError:
print cfg.err + 'Specify a local system command to run'
continue
if execute[:3] == 'cd ':
try:
execute = execute.replace('cd ', '')
os.chdir(execute)
print cfg.pos + "Changed to directory : " + execute
except (WindowsError, OSError):
print cfg.err + 'Could not change to directory : ' + execute
else:
try:
result = subprocess.Popen(execute, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
result = result.stdout.read() + result.stderr.read()
print result
except:
print cfg.err + 'Could not execute command'
else:
print cfg.err + 'Unknown command "' + command + '", run "help" for help menu'
except EOFError:
try:
time.sleep(3)
except KeyboardInterrupt:
cleanup()
except KeyboardInterrupt:
cleanup()
except Exception as e:
print cfg.err + 'Error : ' + str(e)
| StarcoderdataPython |
8107934 | <reponame>AndreaGuarracino/bh20-seq-resource
#! /usr/bin/env python3
#
# Check for updates on Arvados, pull the TTL and
# push into Virtuoso
#
# You can run this in a Guix container with
#
# ~/opt/guix/bin/guix environment -C guix --ad-hoc python python-requests curl --network -- python3 ./scripts/update_virtuoso/check_for_updates.py cache.txt dba dba
import requests
import time
import sys
import os.path
import subprocess
assert(len(sys.argv)==4)
fn = sys.argv[1]
user = sys.argv[2]
pwd = sys.argv[3]
scriptdir = os.path.dirname(os.path.realpath(__file__))
print(scriptdir)
basedir = os.path.dirname(os.path.dirname(scriptdir))
def upload(fn):
# Upload into Virtuoso using CURL
# cmd = "curl -X PUT --digest -u dba:dba -H Content-Type:text/turtle -T metadata.ttl -G http://localhost:8890/sparql-graph-crud-auth --data-urlencode graph=http://covid-19.genenetwork.org/graph".split(" ")
# print("DELETE "+fn)
# cmd = ("curl --digest --user dba:%s --verbose --url -G http://sparql.genenetwork.org/sparql-graph-crud-auth --data-urlencode graph=http://covid-19.genenetwork.org/graph -X DELETE" % pwd).split(" ")
print("UPLOAD "+fn)
cmd = ("curl -X PUT --digest -u dba:%s -H Content-Type:text/turtle -T %s -G http://sparql.genenetwork.org/sparql-graph-crud-auth --data-urlencode graph=http://covid-19.genenetwork.org/graph/%s" % (pwd, fn, os.path.basename(fn)) ).split(" ")
print(cmd)
p = subprocess.Popen(cmd)
output = p.communicate()[0]
print(output)
assert(p.returncode == 0)
url = 'https://download.lugli.arvadosapi.com/c=lugli-4zz18-z513nlpqm03hpca/_/mergedmetadata.ttl'
# --- Fetch headers from TTL file on Arvados
r = requests.head(url)
print(r.headers)
print(r.headers['Last-Modified'])
# --- Convert/validate time stamp
# ValueError: time data 'Tue, 21 Apr 2020 23:47:43 GMT' does not match format '%a %b %d %H:%M:%S %Y'
last_modified_str = r.headers['Last-Modified']
t_stamp = time.strptime(last_modified_str,"%a, %d %b %Y %H:%M:%S %Z" )
print(t_stamp)
# OK, it works, now check last stored value
stamp = None
if os.path.isfile(fn):
file = open(fn,"r")
stamp = file.read()
file.close
if stamp != last_modified_str:
print("Delete graphs")
for graph in ["labels.ttl", "metadata.ttl", "countries.ttl" ""]:
cmd = ("curl --digest -u dba:%s --verbose --url http://127.0.0.1:8890/sparql-graph-crud-auth?graph=http://covid-19.genenetwork.org/graph/%s -X DELETE" % (pwd, graph))
print(cmd)
p = subprocess.Popen(cmd.split(" "))
output = p.communicate()[0]
print(output)
# assert(p.returncode == 0) -> may prevent update
upload(basedir+"/semantic_enrichment/labels.ttl")
upload(basedir+"/semantic_enrichment/countries.ttl")
print("Fetch metadata TTL")
r = requests.get(url)
assert(r.status_code == 200)
with open("metadata.ttl", "w") as f:
f.write(r.text)
f.close
upload("metadata.ttl")
with open(fn,"w") as f:
f.write(last_modified_str)
else:
print("Metadata is up to date")
| StarcoderdataPython |
11234117 | <filename>pyhybro/__init__.py
from pyhybro.optimizer_base import OptimizerBase
from pyhybro.problem import Problem
from pyhybro.OptimizationResult import OptimizationResult, ReferenceSet
| StarcoderdataPython |
6599566 | <gh_stars>1-10
# The framework was adapted from https://github.com/theislab/dca/blob/master/dca/network.py
import numpy as np
import os
import pickle
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, Dropout, Lambda, Concatenate
from tensorflow.keras import Model
from tensorflow.keras.regularizers import l1_l2
from .loss import poisson_loss, NB_loss, ZINB_loss, ZIPoisson_loss
ColwiseMultLayer = Lambda(lambda l: l[0]*tf.reshape(l[1], (-1,1)), name="mean")
DispAct = lambda x: tf.clip_by_value(tf.nn.softplus(x), 1e-4, 1e4)
class DispLayer(tf.keras.layers.Layer):
def __init__(self, units=2000):
super(DispLayer, self).__init__()
self.w = tf.Variable(initial_value=tf.random.normal([units]), trainable=True, name='dispersion')
def call(self, inputs):
return DispAct(self.w)
class ZFixedLayer(tf.keras.layers.Layer):
def __init__(self, dim_latent_space):
super(ZFixedLayer, self).__init__(name='z_fixed')
self.w = tf.Variable(initial_value=tf.convert_to_tensor(create_z_fixed(dim_latent_space), tf.keras.backend.floatx()),
trainable=True, name='z_fixed')
def call(self, inputs):
return tf.matmul(inputs, self.w)
# We borrowed the following function from https://github.com/bmda-unibas/DeepArchetypeAnalysis/blob/master/AT_lib/lib_at.py
def create_z_fixed(dim_latent_space):
"""
Creates Coordinates of the Simplex spanned by the Archetypes.
The simplex will have its centroid at 0.
The sum of the vertices will be zero.
The distance of each vertex from the origin will be 1.
The length of each edge will be constant.
The dot product of the vectors defining any two vertices will be - 1 / M.
This also means the angle subtended by the vectors from the origin
to any two distinct vertices will be arccos ( - 1 / M ).
:param dim_latent_space:
:return:
"""
z_fixed_t = np.zeros([dim_latent_space, dim_latent_space + 1])
for k in range(0, dim_latent_space):
s = 0.0
for i in range(0, k):
s = s + z_fixed_t[i, k] ** 2
z_fixed_t[k, k] = np.sqrt(1.0 - s)
for j in range(k + 1, dim_latent_space + 1):
s = 0.0
for i in range(0, k):
s = s + z_fixed_t[i, k] * z_fixed_t[i, j]
z_fixed_t[k, j] = (-1.0 / float(dim_latent_space) - s) / z_fixed_t[k, k]
z_fixed = np.transpose(z_fixed_t)
return z_fixed
class Autoencoder():
def __init__(self,
input_size,
output_size=None,
hidden_size=(64, 32, 64),
dispersion = 'gene',
lat_coef=1.,
l2_coef=0.,
l1_coef=0.,
l2_enc_coef=0.,
l1_enc_coef=0.,
ridge=0.,
hidden_dropout=0.,
input_dropout=0.,
activation='relu',
init='glorot_normal',
file_path=None,
debug=False):
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.dispersion = dispersion
self.lat_coef = lat_coef
self.l2_coef = l2_coef
self.l1_coef = l1_coef
self.l2_enc_coef = l2_enc_coef
self.l1_enc_coef = l1_enc_coef
self.ridge = ridge
self.hidden_dropout = hidden_dropout
self.input_dropout = input_dropout
self.activation = activation
self.init = init
self.loss = None
self.file_path = file_path
self.extra_models = {}
self.model = None
self.encoder = None
self.decoder = None
self.input_layer = None
self.sf_layer = None
self.debug = debug
if self.output_size is None:
self.output_size = input_size
if isinstance(self.hidden_dropout, list):
assert len(self.hidden_dropout) == len(self.hidden_size)
else:
self.hidden_dropout = [self.hidden_dropout]*len(self.hidden_size)
def build_enc(self):
self.input_layer = Input(shape=(self.input_size,), name='nor_count')
self.sf_layer = Input(shape=(1,), name='lib_size')
last_hidden = self.input_layer
if self.input_dropout > 0.0:
last_hidden = Dropout(self.input_dropout, name='input_dropout')(last_hidden)
for i, (hid_size, hid_drop) in enumerate(zip(self.hidden_size, self.hidden_dropout)):
center_idx = int(np.floor(len(self.hidden_size) / 2.0))
self.center_idx = center_idx
if i == center_idx:
layer_name = 'center'
stage = 'center' # let downstream know where we are
self.num_at = hid_size
self.dim_latent_space = self.num_at - 1
elif i < center_idx:
layer_name = 'enc%s' % i
stage = 'encoder'
else:
#layer_name = 'dec%s' % (i-center_idx)
stage = 'decoder'
break
# use encoder-specific l1/l2 reg coefs if given
if self.l1_enc_coef != 0. and stage in ('center', 'encoder'):
l1 = self.l1_enc_coef
else:
l1 = self.l1_coef
if self.l2_enc_coef != 0. and stage in ('center', 'encoder'):
l2 = self.l2_enc_coef
else:
l2 = self.l2_coef
if stage == 'center': # yuge
fc_a = Dense(hid_size, activation='softmax', kernel_initializer=self.init,
kernel_regularizer=l1_l2(l1, l2),
name=layer_name)(last_hidden)
fc_b_t = Dense(hid_size, activation=None, kernel_initializer=self.init,
kernel_regularizer=l1_l2(l1, l2),
name="%s_b_t"%layer_name)(last_hidden)
# Add archetype regularization loss
#z_fixed = tf.eye(self.num_at)
#self.z_fixed = create_z_fixed(self.dim_latent_space)
#mu_t = tf.matmul(fc_a, self.z_fixed)
mu_t = ZFixedLayer(self.dim_latent_space)(fc_a)
fc_b = tf.nn.softmax(tf.transpose(fc_b_t), 1)
self.z_predicted = tf.matmul(fc_b, mu_t)
#self.arch_loss = tf.math.reduce_mean(tf.math.square(self.z_fixed - z_predicted))
#last_hidden = fc_a
last_hidden = mu_t
else:
last_hidden = Dense(hid_size, activation = tf.nn.leaky_relu,
kernel_initializer=self.init,
kernel_regularizer=l1_l2(l1, l2),
name=layer_name)(last_hidden)
if hid_drop > 0.0:
last_hidden = Dropout(hid_drop, name='%s_drop'%layer_name)(last_hidden)
self.encoder_output = last_hidden
def build_dec(self):
last_hidden = Dense(self.hidden_size[(self.center_idx+1)],
kernel_initializer=self.init,
activation=tf.nn.leaky_relu,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='dec0')(self.encoder_output)
if len(self.hidden_size) > (self.center_idx+2):
for i, hid_size in enumerate(self.hidden_size[(self.center_idx+2):]):
last_hidden = Dense(hid_size, activation=tf.nn.leaky_relu,
kernel_initializer=self.init,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name="dec%s"%(i+1))(last_hidden)
self.decoder_output = last_hidden
self.dec_layer_num = len(self.hidden_size) - self.center_idx - 1
self.build_output()
def build_output(self):
self.loss = tf.keras.losses.MeanSquaredError()
mean = Dense(self.output_size, kernel_initializer=self.init, activation="softmax",
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='scaled_mean')(self.decoder_output)
#output = ColwiseMultLayer([mean, self.ls_layer])
# keep unscaled output as an extra model
self.extra_models['mean_norm'] = Model(inputs=self.input_layer, outputs=mean)
self.model = Model(inputs=[self.input_layer, self.sf_layer], outputs=mean) # yuge
self.arch_loss = tf.math.reduce_mean(tf.math.square(self.model.get_layer('z_fixed').get_weights()[0] - self.z_predicted))
self.model.add_loss(self.arch_loss*self.lat_coef)
self.encoder = self.get_encoder()
def save(self):
if self.file_path:
os.makedirs(self.file_path, exist_ok=True)
with open(os.path.join(self.file_path, 'model.pickle'), 'wb') as f:
pickle.dump(self, f)
def get_encoder(self, activation=False):
return Model(inputs=self.model.input,
outputs=self.model.get_layer('center').output)
def get_decoder(self):
# Extract decoder fitted weights
restored_w = []
for i in range(self.dec_layer_num):
restored_w.extend(self.model.get_layer('dec%s'%i).get_weights())
restored_w.extend(self.model.get_layer('scaled_mean').get_weights())
# Construct decoder
dec_input_layer = Input(shape=(self.dim_latent_space,), name='latent_space')
last_hidden = dec_input_layer
for i, hid_size in enumerate(self.hidden_size[(self.center_idx+1):]):
last_hidden = Dense(hid_size, activation=tf.nn.leaky_relu,
kernel_initializer=self.init,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name="dec%s"%i)(last_hidden)
mean = Dense(self.output_size, kernel_initializer=self.init, activation="softmax",
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='scaled_mean')(last_hidden)
dec_model = Model(inputs=dec_input_layer, outputs=mean)
dec_model.set_weights(restored_w)
return dec_model
def predict(self, nor_count, lib_size, return_info = False):
print('scAAnet: Calculating reconstructions...')
preds = self.model.predict({'nor_count': nor_count,
'lib_size': lib_size})
if isinstance(preds, list):
recon = preds[0]
else:
recon = preds
print('scAAnet: Calculating low dimensional representations...')
usage = self.encoder.predict({'nor_count': nor_count,
'lib_size': lib_size})
print('scAAnet: Calculating spectra in the original space')
self.decoder = self.get_decoder()
spectra = self.decoder.predict(self.model.get_layer('z_fixed').get_weights()[0])
return {'recon': recon, 'usage': usage, 'spectra': spectra}
class PoissonAutoencoder(Autoencoder):
def build_output(self):
mean = Dense(self.output_size, activation="softmax", kernel_initializer=self.init,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='scaled_mean')(self.decoder_output)
output = ColwiseMultLayer([mean, self.sf_layer])
self.loss = poisson_loss
self.extra_models['mean_norm'] = Model(inputs=self.input_layer, outputs=mean)
self.model = Model(inputs=[self.input_layer, self.sf_layer], outputs=output)
self.arch_loss = tf.math.reduce_mean(tf.math.square(self.model.get_layer('z_fixed').get_weights()[0] - self.z_predicted))
self.model.add_loss(self.arch_loss*self.lat_coef)
self.encoder = self.get_encoder()
class ZIPoissonAutoencoder(Autoencoder):
def build_dec(self):
# pi
last_hidden_pi = Dense(self.hidden_size[(self.center_idx+1)],
kernel_initializer=self.init,
activation=tf.nn.leaky_relu,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='dec_pi0')(Concatenate()([self.encoder_output, self.sf_layer]))
if len(self.hidden_size) > (self.center_idx+2):
for i, hid_size in enumerate(self.hidden_size[(self.center_idx+2):]):
last_hidden_pi = Dense(hid_size, activation=tf.nn.leaky_relu,
kernel_initializer=self.init,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name="dec_pi%s"%(i+1))(last_hidden_pi)
# Scaled mean
last_hidden_mean = Dense(self.hidden_size[(self.center_idx+1)],
kernel_initializer=self.init,
activation=tf.nn.leaky_relu,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='dec0')(self.encoder_output)
if len(self.hidden_size) > (self.center_idx+2):
for i, hid_size in enumerate(self.hidden_size[(self.center_idx+2):]):
last_hidden_mean = Dense(hid_size, activation=tf.nn.leaky_relu,
kernel_initializer=self.init,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name="dec%s"%(i+1))(last_hidden_mean)
self.decoder_output_mean = last_hidden_mean
self.decoder_output_pi = last_hidden_pi
self.dec_layer_num = len(self.hidden_size) - self.center_idx - 1
self.build_output()
def build_output(self):
pi = Dense(self.output_size, activation=None, kernel_initializer=self.init,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='pi')(self.decoder_output_pi)
mean = Dense(self.output_size, activation="softmax", kernel_initializer=self.init,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='scaled_mean')(self.decoder_output_mean)
output = ColwiseMultLayer([mean, self.sf_layer])
self.loss = ZIPoisson_loss
self.extra_models['mean_norm'] = Model(inputs=self.input_layer, outputs=mean)
self.model = Model(inputs=[self.input_layer, self.sf_layer],
outputs=[output, pi])
self.arch_loss = tf.math.reduce_mean(tf.math.square(self.model.get_layer('z_fixed').get_weights()[0] - self.z_predicted))
self.model.add_loss(self.arch_loss*self.lat_coef)
self.encoder = self.get_encoder()
def predict(self, nor_count, lib_size, return_info=False):
preds = super().predict(nor_count, lib_size)
if return_info:
_, pi = self.model.predict({'nor_count': nor_count,'lib_size': lib_size})
preds['pi'] = pi
return preds
class NBAutoencoder(Autoencoder):
def build_dec(self):
# Dispersion
last_hidden_disp = Dense(self.hidden_size[(self.center_idx+1)],
kernel_initializer=self.init,
activation=tf.nn.leaky_relu,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='dec_disp0')(Concatenate()([self.encoder_output, self.sf_layer]))
if len(self.hidden_size) > (self.center_idx+2):
for i, hid_size in enumerate(self.hidden_size[(self.center_idx+2):]):
last_hidden_disp = Dense(hid_size, activation=tf.nn.leaky_relu,
kernel_initializer=self.init,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name="dec_disp%s"%(i+1))(last_hidden_disp)
# Scaled mean
last_hidden_mean = Dense(self.hidden_size[(self.center_idx+1)],
kernel_initializer=self.init,
activation=tf.nn.leaky_relu,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='dec0')(self.encoder_output)
if len(self.hidden_size) > (self.center_idx+2):
for i, hid_size in enumerate(self.hidden_size[(self.center_idx+2):]):
last_hidden_mean = Dense(hid_size, activation=tf.nn.leaky_relu,
kernel_initializer=self.init,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name="dec%s"%(i+1))(last_hidden_mean)
self.decoder_output_mean = last_hidden_mean
self.decoder_output_disp = last_hidden_disp
self.dec_layer_num = len(self.hidden_size) - self.center_idx - 1
self.build_output()
def build_output(self):
if self.dispersion == 'gene-cell':
disp = Dense(self.output_size, activation=DispAct,
kernel_initializer=self.init,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='dispersion')(self.decoder_output_disp)
else:
disp = DispLayer(self.output_size)(self.decoder_output_disp)
mean = Dense(self.output_size, activation="softmax", kernel_initializer=self.init,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='scaled_mean')(self.decoder_output_mean)
output = ColwiseMultLayer([mean, self.sf_layer])
self.loss = NB_loss
self.extra_models['mean_norm'] = Model(inputs=self.input_layer, outputs=mean)
self.model = Model(inputs=[self.input_layer, self.sf_layer],
outputs=[output, disp])
self.arch_loss = tf.math.reduce_mean(tf.math.square(self.model.get_layer('z_fixed').get_weights()[0] - self.z_predicted))
self.model.add_loss(self.arch_loss*self.lat_coef)
self.encoder = self.get_encoder()
def predict(self, nor_count, lib_size, return_info=False):
preds = super().predict(nor_count, lib_size)
if return_info:
_, disp = self.model.predict({'nor_count': nor_count,'lib_size': lib_size},
batch_size=nor_count.shape[0])
preds['disp'] = disp
return preds
class ZINBAutoencoder(Autoencoder):
def build_dec(self):
# Dispersion/pi
last_hidden_disp_pi = Dense(self.hidden_size[(self.center_idx+1)],
kernel_initializer=self.init,
activation=tf.nn.leaky_relu,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='dec_disp_pi0')(Concatenate()([self.encoder_output, self.sf_layer]))
if len(self.hidden_size) > (self.center_idx+2):
for i, hid_size in enumerate(self.hidden_size[(self.center_idx+2):]):
last_hidden_disp_pi = Dense(hid_size, activation=tf.nn.leaky_relu,
kernel_initializer=self.init,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name="dec_disp_pi%s"%(i+1))(last_hidden_disp_pi)
# Scaled mean
last_hidden_mean = Dense(self.hidden_size[(self.center_idx+1)],
kernel_initializer=self.init,
activation=tf.nn.leaky_relu,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='dec0')(self.encoder_output)
if len(self.hidden_size) > (self.center_idx+2):
for i, hid_size in enumerate(self.hidden_size[(self.center_idx+2):]):
last_hidden_mean = Dense(hid_size, activation=tf.nn.leaky_relu,
kernel_initializer=self.init,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name="dec%s"%(i+1))(last_hidden_mean)
self.decoder_output_mean = last_hidden_mean
self.decoder_output_disp_pi = last_hidden_disp_pi
self.dec_layer_num = len(self.hidden_size) - self.center_idx - 1
self.build_output()
def build_output(self):
if self.dispersion == 'gene-cell':
disp = Dense(self.output_size, activation=DispAct,
kernel_initializer=self.init,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='dispersion')(self.decoder_output_disp_pi)
else:
disp = DispLayer(self.input_size)(self.decoder_output_disp_pi)
pi = Dense(self.output_size, activation=None, kernel_initializer=self.init,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='pi')(self.decoder_output_disp_pi)
mean = Dense(self.output_size, activation='softmax', kernel_initializer=self.init,
kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
name='scaled_mean')(self.decoder_output_mean)
output = ColwiseMultLayer([mean, self.sf_layer])
self.loss = ZINB_loss
self.extra_models['mean_norm'] = Model(inputs=self.input_layer, outputs=mean)
self.model = Model(inputs=[self.input_layer, self.sf_layer],
outputs=[output, disp, pi])
self.arch_loss = tf.math.reduce_mean(tf.math.square(self.model.get_layer('z_fixed').get_weights()[0] - self.z_predicted))
self.model.add_loss(self.arch_loss*self.lat_coef)
self.encoder = self.get_encoder()
def predict(self, nor_count, lib_size, return_info=False):
# warning! this may overwrite adata.X
preds = super().predict(nor_count, lib_size)
if return_info:
_, disp, pi = self.model.predict({'nor_count': nor_count, 'lib_size': lib_size},
batch_size=nor_count.shape[0])
preds['disp'] = disp
preds['pi'] = pi
return preds
AE_types = {'normal': Autoencoder, 'poisson': PoissonAutoencoder,
'zipoisson': ZIPoissonAutoencoder,
'nb': NBAutoencoder, 'zinb': ZINBAutoencoder} | StarcoderdataPython |
390146 | <filename>tests/urlpatterns_reverse/urls.py<gh_stars>1-10
from django.conf.urls import include, url
from .views import (
absolute_kwargs_view, defaults_view, empty_view, empty_view_partial,
empty_view_wrapped, nested_view,
)
other_patterns = [
url(r'non_path_include/$', empty_view, name='non_path_include'),
url(r'nested_path/$', nested_view),
]
urlpatterns = [
url(r'^places/([0-9]+)/$', empty_view, name='places'),
url(r'^places?/$', empty_view, name="places?"),
url(r'^places+/$', empty_view, name="places+"),
url(r'^places*/$', empty_view, name="places*"),
url(r'^(?:places/)?$', empty_view, name="places2?"),
url(r'^(?:places/)+$', empty_view, name="places2+"),
url(r'^(?:places/)*$', empty_view, name="places2*"),
url(r'^places/([0-9]+|[a-z_]+)/', empty_view, name="places3"),
url(r'^places/(?P<id>[0-9]+)/$', empty_view, name="places4"),
url(r'^people/(?P<name>\w+)/$', empty_view, name="people"),
url(r'^people/(?:name/)', empty_view, name="people2"),
url(r'^people/(?:name/(\w+)/)?', empty_view, name="people2a"),
url(r'^people/(?P<name>\w+)-(?P=name)/$', empty_view, name="people_backref"),
url(r'^optional/(?P<name>.*)/(?:.+/)?', empty_view, name="optional"),
url(r'^optional/(?P<arg1>\d+)/(?:(?P<arg2>\d+)/)?', absolute_kwargs_view, name="named_optional"),
url(r'^optional/(?P<arg1>\d+)/(?:(?P<arg2>\d+)/)?$', absolute_kwargs_view, name="named_optional_terminated"),
url(r'^nested/noncapture/(?:(?P<p>\w+))$', empty_view, name='nested-noncapture'),
url(r'^nested/capture/((\w+)/)?$', empty_view, name='nested-capture'),
url(r'^nested/capture/mixed/((?P<p>\w+))$', empty_view, name='nested-mixedcapture'),
url(r'^nested/capture/named/(?P<outer>(?P<inner>\w+)/)?$', empty_view, name='nested-namedcapture'),
url(r'^hardcoded/$', empty_view, name="hardcoded"),
url(r'^hardcoded/doc\.pdf$', empty_view, name="hardcoded2"),
url(r'^people/(?P<state>\w\w)/(?P<name>\w+)/$', empty_view, name="people3"),
url(r'^people/(?P<state>\w\w)/(?P<name>[0-9])/$', empty_view, name="people4"),
url(r'^people/((?P<state>\w\w)/test)?/(\w+)/$', empty_view, name="people6"),
url(r'^character_set/[abcdef0-9]/$', empty_view, name="range"),
url(r'^character_set/[\w]/$', empty_view, name="range2"),
url(r'^price/\$([0-9]+)/$', empty_view, name="price"),
url(r'^price/[$]([0-9]+)/$', empty_view, name="price2"),
url(r'^price/[\$]([0-9]+)/$', empty_view, name="price3"),
url(r'^product/(?P<product>\w+)\+\(\$(?P<price>[0-9]+(\.[0-9]+)?)\)/$', empty_view, name="product"),
url(r'^headlines/(?P<year>[0-9]+)\.(?P<month>[0-9]+)\.(?P<day>[0-9]+)/$', empty_view, name="headlines"),
url(r'^windows_path/(?P<drive_name>[A-Z]):\\(?P<path>.+)/$', empty_view, name="windows"),
url(r'^special_chars/(?P<chars>.+)/$', empty_view, name="special"),
url(r'^(?P<name>.+)/[0-9]+/$', empty_view, name="mixed"),
url(r'^repeats/a{1,2}/$', empty_view, name="repeats"),
url(r'^repeats/a{2,4}/$', empty_view, name="repeats2"),
url(r'^repeats/a{2}/$', empty_view, name="repeats3"),
url(r'^(?i)CaseInsensitive/(\w+)', empty_view, name="insensitive"),
url(r'^test/1/?', empty_view, name="test"),
url(r'^(?i)test/2/?$', empty_view, name="test2"),
url(r'^outer/(?P<outer>[0-9]+)/', include('urlpatterns_reverse.included_urls')),
url(r'^outer-no-kwargs/([0-9]+)/', include('urlpatterns_reverse.included_no_kwargs_urls')),
url('', include('urlpatterns_reverse.extra_urls')),
url(r'^lookahead-/(?!not-a-city)(?P<city>[^/]+)/$', empty_view, name='lookahead-negative'),
url(r'^lookahead\+/(?=a-city)(?P<city>[^/]+)/$', empty_view, name='lookahead-positive'),
url(r'^lookbehind-/(?P<city>[^/]+)(?<!not-a-city)/$', empty_view, name='lookbehind-negative'),
url(r'^lookbehind\+/(?P<city>[^/]+)(?<=a-city)/$', empty_view, name='lookbehind-positive'),
# Partials should be fine.
url(r'^partial/', empty_view_partial, name="partial"),
url(r'^partial_wrapped/', empty_view_wrapped, name="partial_wrapped"),
# This is non-reversible, but we shouldn't blow up when parsing it.
url(r'^(?:foo|bar)(\w+)/$', empty_view, name="disjunction"),
url(r'absolute_arg_view/$', absolute_kwargs_view),
# Tests for #13154. Mixed syntax to test both ways of defining URLs.
url(r'defaults_view1/(?P<arg1>[0-9]+)/', defaults_view, {'arg2': 1}, name='defaults'),
url(r'defaults_view2/(?P<arg1>[0-9]+)/', defaults_view, {'arg2': 2}, 'defaults'),
url('^includes/', include(other_patterns)),
# Security tests
url('(.+)/security/$', empty_view, name='security'),
]
| StarcoderdataPython |
5051145 |
import sys
import os
import time
import string
import re
import time
import shutil
from mimetypes import types_map
from urllib import quote, unquote
from resources.markup import *
APP_ROOT, file_name = os.path.split(os.path.abspath(__file__))
TEMPLATES = os.path.join(APP_ROOT, "resources", "TEMPLATES")
TESTS = os.path.join(APP_ROOT, "TESTS")
STYLESHEET = os.path.join(APP_ROOT, "resources", "STYLE")
STYLESHEET_NAME = "style.css"
PAPA = "PAPA"
TESTCASES = 'test-cases'
if sys.platform == "win32":
import os, msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
class Entry(object):
def __init__(self):
self.title = []
self.url = []
self.desc = []
self.label = []
self.id = []
self.buffer = []
self.index = 0
self.mode = ''
self.tabs = ''
self.urls = ''
self.repo = ''
self.index_count = 0
self.file_name = ''
self.deprecated = False
def __str__(self):
ret = []
for p in ['label',
'mode',
'tabs',
'urls',
'repo',
'index',
'file_name',
'desc']:
value = getattr(self, p)
if value:
if type(value) == type([]):
ret.append("%s: %s" % (p, "".join(value)))
else:
ret.append("%s: %s" % (p, value))
return '-----------------\n' + "\n".join(ret)
def normalize(self):
for prop in ['title', 'url', 'desc', 'label']:
setattr(self, prop, filter(lambda s: bool(s.strip()), getattr(self, prop)))
def is_empty(self):
return bool(self.title or self.url or self.desc or self.label)
def check_test_index():
"""Verify that all tests in the TESTS file have a unique id.
"""
ID = 1
LABEL = 2
DESC = 3
ERROR = 4
state = DESC
in_file = open(TESTS, 'r')
#
for line in in_file.readlines():
if line.startswith('id:'):
if not state == DESC:
state = ERROR
break
state = ID
elif line.startswith('label:'):
if not state == ID:
state = ERROR
break
state = LABEL
elif line.startswith('desc:'):
if not state == LABEL:
state = ERROR
break
state = DESC
in_file.close()
return not state == ERROR
DEFAULT_ID_DELTA = 100
def get_next_id(id_count, lines, index):
while True:
line = lines[index]
index += 1
if line.startswith('***') or index >= len(lines):
return id_count + DEFAULT_ID_DELTA
elif line.startswith('id:'):
return int((id_count + int(line[3:])) / 2)
def add_ids_test_index():
"""Add an id to all tests which are missing one.
"""
import shutil
import tempfile
ID = 1
LABEL = 2
DESC = 3
ERROR = 4
state = DESC
in_file = open(TESTS, 'rb')
lines = in_file.readlines()
in_file.close()
id_count = 0
tmpfd, tmppath = tempfile.mkstemp(".tmp", "dftests.")
tmpfile = os.fdopen(tmpfd, "w")
# state order: ID, LABEL, DESC
# title resets the id_count (counting restarts in each repo)
for index, line in enumerate(lines):
if line.startswith('***'):
id_count = 0
elif line.startswith('id:'):
if not state == DESC:
state = ERROR
break
state = ID
id_count = int(line[3:])
elif line.startswith('label:'):
if state == DESC:
id = get_next_id(id_count, lines, index)
tmpfile.write("id: %#05i\n" % id)
id_count = id
state = ID
if not state == ID:
state = ERROR
break
state = LABEL
elif line.startswith('desc:'):
if not state == LABEL:
state = ERROR
break
state = DESC
tmpfile.write(line)
tmpfile.close()
if state == ERROR:
raise AssertionError("Not well formed entry on line %s!" % index)
shutil.copy(tmppath, TESTS)
os.unlink(tmppath)
def get_tests():
"""Parse the TESTS file.
Parse the TESTS file and return a list of Entry objects
"""
if not check_test_index():
add_ids_test_index()
in_file = open(TESTS, 'rb')
entries = []
entry = Entry()
cur = entry.buffer
counter = 1
is_pre = False
pre_sapces = 0
for line in in_file.readlines():
if "@pre" in line:
pre_sapces = line.find("@pre")
is_pre = True
cur.append("@pre")
continue
if "@/pre" in line:
pre_sapces = 0
is_pre = False
cur.append("@/pre")
continue
if is_pre:
cur.append(line[pre_sapces:])
continue
else:
line = line.strip()
if line.startswith('#'):
continue
elif not line:
entries.append(entry)
entry = Entry()
cur = entry.buffer
elif line.startswith('id:'):
cur = entry.id
cur.append(line[3:])
elif line.startswith('label:'):
cur = entry.label
cur.append(line[6:])
elif line.startswith('desc:'):
cur = entry.desc
cur.append(line[5:])
elif line.startswith('url:'):
cur = entry.url
cur.append(line[4:])
elif line.startswith('***'):
entry.title = entry.buffer
elif line.startswith('deprecated:'):
entry.deprecated = "true" in line.lower() and True or False
else:
cur.append(line)
in_file.close()
return entries
def parse_title(title):
# Error Console.All Console.JavaScript Console.CSS
top_tab = ''
sub_tabs = []
t = title.strip().split('.')
if len(t):
top_tab = t.pop(0)
if len(t) == 1:
sub_tabs.append(t[0])
elif len(t) > 1:
for s in t:
st = map(lambda x: x.strip(), s.split(' '))
while st[len(st) - 1] in top_tab:
st.pop()
sub_tabs.append(' '.join(st))
return top_tab, sub_tabs
def load_templates():
with open(TEMPLATES, 'rb') as f:
cur_key = ""
cur_value = []
for line in f.readlines():
if line.startswith('HTML_'):
if cur_value:
globals()[cur_key] = "".join(cur_value)
cur_value = []
cur_key = line.strip()
elif not line.startswith('#'):
cur_value.append(line)
if cur_value:
globals()[cur_key] = "".join(cur_value)
def label2filename(label):
label = ''.join(label)
for char in ["|", "\\", "?", "*", "<", "\"", ":",
">", "+", "[", "]", "/", "%", " ", ","]:
label = label.replace(char, '-')
for pattern, repl in [[r"^-+", ""], [r"--+", "-"]]:
label = re.sub(pattern, repl, label)
return ''.join(label).strip().lower()
def tests2singledocs():
entries = get_tests()
for e in entries:
e.normalize()
entries = filter(lambda e: e.is_empty(), entries)
cur = Entry()
type = ''
for entry in entries:
if entry.title:
cur.mode, ts = parse_title(''.join(entry.title))
cur.repo = [label2filename(cur.mode)]
if ts:
cur.repo.append(label2filename(ts[0]))
cur.tabs = ', '.join(ts)
type = 'title'
index = 1
elif entry.url:
if type == 'url':
cur.urls.extend(entry.url)
else:
cur.urls = entry.url[:]
type = 'url'
if entry.label:
type = 'label'
entry.mode = cur.mode
entry.tabs = cur.tabs
entry.urls = entry.url or cur.urls
entry.repo = cur.repo[0:]
entry.index = ''.join(entry.id).strip()
file_name = label2filename(entry.label)
entry.file_name = "%s.%s.html" % (entry.index, file_name)
index += 1
return filter(lambda e: e.label , entries)
def print_index(index):
content = [HTML_HEAD % STYLESHEET_NAME, HTML_MAIN_TITLE]
sections = []
links = None
cur_mode = ''
cur_tab = ''
for mode, tab, label, path in index:
if not mode == cur_mode:
cur_mode = mode
sections.append((mode, None))
cur_tab = None
if not tab == cur_tab:
cur_tab = tab
links = []
sections.append((tab, links))
links.append(HTML_URL % (path, label))
for title, links in sections:
if links == None:
content.append(HTML_MODE_SECTION % title)
else:
content.append(HTML_SECTION % (title, "".join(links)))
with open(os.path.join(PAPA, 'index.html'), 'wb') as f:
f.write("".join(content))
def print_stylesheet():
content = ""
with open(STYLESHEET, 'rb') as f:
content = f.read()
with open(os.path.join(PAPA, STYLESHEET_NAME), 'wb') as f:
f.write(content)
def item2html(item):
ret = item.replace('<', '<').replace('"', '"').replace('@pre', '<pre>').replace('@/pre', '</pre>')
if "@line-through" in ret:
ret = HTML_LINE_THROUGH.strip() % ret.replace("@line-through", "")
return ret
def test():
load_templates()
entries = tests2singledocs()
"""
label: Export
mode: DOM
tabs: DOM
urls: http://dev.opera.com
repo: dom
index: 0002
file_name: 0002.export.html
desc: - Press the Export button.- Verify that the current view is displayed in a new tab.
"""
if not os.path.exists(PAPA):
os.makedirs(PAPA)
index = []
for e in entries:
content = [HTML_HEAD % (("../" * len(e.repo)) + STYLESHEET_NAME)]
urls = []
for u in e.urls:
u = u.replace('./', '../' * len(e.repo))
urls.append(HTML_URL % (u, u))
raw_items = [item2html(item) for item in e.desc if item]
string = ""
items = []
for item in raw_items:
if item.startswith('-') or item.startswith('*'):
if string:
items.append(string)
string = item.lstrip('-* ')
else:
string += ' ' + item
if string:
items.append(string)
content.append(HTML_TITLE % ("".join(e.label),
e.deprecated and HTML_DEPRECATED or "",
e.mode,
e.tabs,
"".join(urls),
e.index,
"".join([HTML_ITEM % item for item in items])))
repo = PAPA
for dir in e.repo:
repo = os.path.join(repo, dir)
if not os.path.exists(repo):
os.makedirs(repo)
with open(os.path.join(repo, e.file_name), 'wb') as f:
f.write("".join(content))
index.append((e.mode, e.tabs, "".join(e.label), "./%s/%s" % ('/'.join(e.repo), e.file_name)))
print_index(index)
print_stylesheet()
if not os.path.exists(os.path.join(PAPA, TESTCASES)):
shutil.copytree(TESTCASES, os.path.join(PAPA, TESTCASES))
if __name__ == '__main__':
test()
| StarcoderdataPython |
5017510 | #!/usr/bin/python3
"""
For creating SVG code from a tikz diagram
"""
import find_block
import os
import subprocess
import nowiki_block
class TikzDiagramException(Exception):
def __init__(self, message):
super().__init__(message)
def tikz_diagram_processor(tikz_diagram):
tikz_diagram = (
"\\begin{tikzpicture}" +
tikz_diagram +
"\\end{tikzpicture}")
diagram_api_path = os.environ["NLAB_DIAGRAM_API_PATH"]
completed_tikz_diagram_process = subprocess.run(
[ diagram_api_path, "tikz" ],
input = tikz_diagram,
text = True,
capture_output = True)
if completed_tikz_diagram_process.returncode != 0:
raise TikzDiagramException(
completed_tikz_diagram_process.stderr)
return completed_tikz_diagram_process.stdout
def tikz_commutative_diagram_processor(tikz_diagram):
if tikz_diagram.strip()[0] == '[':
tikz_diagram = (
"\\begin{tikzcd}" +
tikz_diagram +
"\\end{tikzcd}")
else:
tikz_diagram = (
"\\begin{tikzcd}" +
"[row sep=huge, column sep=huge, transform shape, nodes = {scale=1.25}]" +
tikz_diagram +
"\\end{tikzcd}")
diagram_api_path = os.environ["NLAB_DIAGRAM_API_PATH"]
completed_tikz_diagram_process = subprocess.run(
[ diagram_api_path, "tikz", "-c" ],
input = tikz_diagram,
text = True,
capture_output = True)
if completed_tikz_diagram_process.returncode != 0:
raise TikzDiagramException(
completed_tikz_diagram_process.stderr)
return completed_tikz_diagram_process.stdout
def define_tikz():
return find_block.Block(
"\\begin{tikzpicture}",
"\\end{tikzpicture}",
tikz_diagram_processor,
True)
def define_tikz_commutative_diagram():
return find_block.Block(
"\\begin{tikzcd}",
"\\end{tikzcd}",
tikz_commutative_diagram_processor,
True)
def handle_tikz_diagrams(content):
processor = find_block.Processor([
define_tikz(),
define_tikz_commutative_diagram(),
nowiki_block.define(True) ])
return processor.process(content)
| StarcoderdataPython |
6416795 | <filename>utls/add_exon_average_expression.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Add average expression level of each exon to each variant
1. Exon expression level file:
/groups/umcg-bios/prm02/projects/expression/combined_exon_count_run_1_passQC.TMM.txt.gz
2. Use the mean of all samples in the file.
3. No MT/sexual chromosome included
4. Only SNV
"""
from argparse import ArgumentParser
import pandas as pd
def main():
"""The docstring"""
parser = ArgumentParser()
parser.add_argument(
"-e", "--expression-table", required=True, dest="expression_table",
help="The file including expression level for each exon"
)
parser.add_argument(
"-v", "--variant-matrix", required=True, dest="variant_matrix",
help="The file including variants"
)
parser.add_argument(
"-o", "--output-file", default="output_file.tsv", dest="output_file",
help="The file preocessed dataframe will be dumpped to"
)
args = parser.parse_args()
expression_table = args.expression_table
variant_matrix = args.variant_matrix
output_file = args.output_file
expression_dtfm = pd.read_csv(expression_table, sep="\t", header=0)
variant_dtfm = pd.read_csv(variant_matrix, sep="\t", header=0)
expression_tobe_index = expression_dtfm["-"]
expression_dtfm_new_idx = [
tuple(23 if "MT" in y or "X" in y or "Y" in y else int(y) for y in x.rsplit("_")[1:])
for x in expression_tobe_index
]
del expression_dtfm["-"]
variant_dtfm_new_idx = list(zip(variant_dtfm["Chrom"], variant_dtfm["Pos"]))
expression_dtfm.index = ["_".join([str(y) for y in x]) for x in expression_dtfm_new_idx]
variant_dtfm.index = ["_".join([str(y) for y in x]) for x in variant_dtfm_new_idx]
position_pool = sorted(expression_dtfm_new_idx + variant_dtfm_new_idx, key=lambda x: x[:2])
clipped_exon_position = (1, 0, 0)
current_variant_position = (1, 0)
exon_expression_for_variant = {}
for position in position_pool:
if len(position) == 3:
clipped_exon_position = position
elif len(position) == 2:
current_variant_position = position
variant_chr, variant_pos = current_variant_position
exon_chr, exon_start, exon_end = clipped_exon_position
if variant_chr == exon_chr:
exon_average_expression = 0
if exon_start <= variant_pos <= exon_end:
if current_variant_position in exon_expression_for_variant:
print("duplicated variant. This shouldn't happend")
continue
else:
exon_average_expression = expression_dtfm.loc["_".join([str(x) for x in clipped_exon_position]), :].mean()
exon_expression_for_variant["_".join([str(x) for x in current_variant_position])] = exon_average_expression
else:
print("Comparing a variant {} and a exon {} from different chrom, this shouldn't happen".format(current_variant_position, clipped_exon_position))
else:
print("Unknown type of position. This shouldn't happen")
variant_dtfm["exon_exp"] = pd.Series(exon_expression_for_variant)
variant_dtfm.to_csv(output_file, sep="\t", header=True, index=False)
if __name__ == "__main__":
main()
| StarcoderdataPython |
6424921 | from tkinter import *
from tkinter import ttk
from tkinter import messagebox
import tobjcreator as tobjcreator
import os
import sys
def create(*args):
if not os.path.exists(source_tobj.get()):
messagebox.showerror("Error","Source TOBJ file doesn't exist.")
elif len(dds_path.get()) > 255:
messagebox.showerror("Error","DDS path is too long.")
elif len(dds_path.get()) == 0 or len(output_tobj.get()) == 0:
messagebox.showerror("Error","Please fill in all input boxes.")
else:
tobjcreator.main(source_tobj.get(),dds_path.get(),output_tobj.get())
messagebox.showinfo("Success!","TOBJ file successfully created!")
root = Tk()
root.title("TOBJ Creator " + tobjcreator.version + " GUI")
if os.system == "win32":
root.iconbitmap(sys.path[0] + "/icon.ico")
root.resizable(False, False)
mainframe = ttk.Frame(root, padding="3 3 12 12")
mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
source_tobj = StringVar()
dds_path = StringVar()
output_tobj = StringVar()
ttk.Label(mainframe, text="Source TOBJ File:").grid(column=1, row=1, sticky=W)
ttk.Label(mainframe, text="DDS Path:").grid(column=1, row=2, sticky=W)
ttk.Label(mainframe, text="Output TOBJ File:").grid(column=1, row=3, sticky=W)
source_tobj_entry = ttk.Entry(mainframe, width=30, textvariable=source_tobj).grid(column=2, row=1, sticky=(W, E))
dds_path_entry = ttk.Entry(mainframe, width=30, textvariable=dds_path).grid(column=2, row=2, sticky=(W, E))
output_tobj_entry = ttk.Entry(mainframe, width=30, textvariable=output_tobj).grid(column=2, row=3, sticky=(W, E))
ttk.Button(mainframe, text="Run", command=create).grid(column=2, row=4, sticky=W)
for child in mainframe.winfo_children(): child.grid_configure(padx=5, pady=5)
root.bind('<Return>', create)
root.mainloop()
| StarcoderdataPython |
3580533 | <filename>11_python-data-science-toolbox-(part-2)/1-using-iterators-in-pythonland/06_using-zip.py
'''
06 - Using zip
Another interesting function that you've learned is zip(), which takes any number of
iterables and returns a zip object that is an iterator of tuples. If you wanted to print
the values of a zip object, you can convert it into a list and then print it. Printing
just a zip object will not return the values unless you unpack it first. In this exercise,
you will explore this for yourself.
Three lists of strings are pre-loaded: mutants, aliases, and powers. First, you will use
list() and zip() on these lists to generate a list of tuples. Then, you will create a zip
object using zip(). Finally, you will unpack this zip object in a for loop to print the
values in each tuple. Observe the different output generated by printing the list of tuples,
then the zip object, and finally, the tuple values in the for loop.
Instructions:
- Using zip() with list(), create a list of tuples from the three lists mutants, aliases,
and powers (in that order) and assign the result to mutant_data.
- Using zip(), create a zip object called mutant_zip from the three lists mutants, aliases,
and powers.
- Complete the for loop by unpacking the zip object you created and printing the tuple values.
- Use value1, value2, value3 for the values from each of mutants, aliases, and powers, in that order.
'''
# Provided lists
mutants = ['<NAME>', '<NAME>',
'<NAME>', '<NAME>', 'kit<NAME>']
aliases = ['prof x', 'iceman', 'nightcrawler', 'magneto', 'shadowcat']
powers = ['telepathy', 'thermokinesis',
'teleportation', 'magnetokinesis', 'intangibility']
# Create a list of tuples: mutant_data
mutant_data = list(zip(mutants, aliases, powers))
# Print the list of tuples
print(mutant_data)
# Create a zip object using the three lists: mutant_zip
mutant_zip = zip(mutants, aliases, powers)
# Print the zip object
print(mutant_zip)
# Unpack the zip object and print the tuple values
for value1, value2, value3 in mutant_zip:
print(value1, value2, value3)
| StarcoderdataPython |
9717955 | <reponame>Jatin-Madaan/Raspberrypi3
# show the temperature of processor on thinkspeak pi3 B+
import http.client
import urllib
import sys
import time as t
sleep = 5
key = '<KEY>'
def temperature():
while True:
temp = int(open('/sys/class/thermal/thermal_zone0/temp').read())
temp = temp / 1000
params = urllib.parse.urlencode({'field1' : temp, 'key': key})
headers = {"Content-typeZZe" : "application/x-www-form-urlencoded","Accept":"text/plain"}
conn = http.client.HTTPConnection("api.thingspeak.com:80")
try:
conn.request("POST","/update",params,headers)
response = conn.getresponse()
print(temp)
print(response.status, response.reason)
data = response.read()
conn.close()
except:
print("connection failed")
t.sleep(sleep)
temperature()
| StarcoderdataPython |
11336760 | import argparse
import random
import torch
from dqn.agent import DQNAgent
from dqn.replay_buffer import ReplayBuffer
from dqn.wrappers import *
def parse_args():
parser = argparse.ArgumentParser("DQN experiments for Atari games")
parser.add_argument("--seed", type=int, default=42, help="which seed to use")
# Environment
parser.add_argument("--env", type=str, default="PongNoFrameskip-v4", help="name of the game")
# Core DQN parameters
parser.add_argument("--replay-buffer-size", type=int, default=int(1e6), help="replay buffer size")
parser.add_argument("--lr", type=float, default=1e-4, help="learning rate for Adam optimizer")
parser.add_argument("--gamma", type=float, default=0.99, help="discount factor")
parser.add_argument("--num-steps", type=int, default=int(1e6),
help="total number of steps to run the environment for")
parser.add_argument("--batch-size", type=int, default=32, help="number of transitions to optimize at the same time")
parser.add_argument("--learning-starts", type=int, default=10000, help="number of steps before learning starts")
parser.add_argument("--learning-freq", type=int, default=1,
help="number of iterations between every optimization step")
parser.add_argument("--target-update-freq", type=int, default=1000,
help="number of iterations between every target network update")
parser.add_argument("--use-double-dqn", type=bool, default=True, help="use double deep Q-learning")
# e-greedy exploration parameters
parser.add_argument("--eps-start", type=float, default=1.0, help="e-greedy start threshold")
parser.add_argument("--eps-end", type=float, default=0.02, help="e-greedy end threshold")
parser.add_argument("--eps-fraction", type=float, default=0.1, help="fraction of num-steps")
# Reporting
parser.add_argument("--print-freq", type=int, default=10, help="print frequency.")
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
assert "NoFrameskip" in args.env, "Require environment with no frameskip"
env = gym.make(args.env)
env.seed(args.seed)
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
env = EpisodicLifeEnv(env)
env = FireResetEnv(env)
env = WarpFrame(env)
env = PyTorchFrame(env)
env = ClipRewardEnv(env)
env = FrameStack(env, 4)
replay_buffer = ReplayBuffer(args.replay_buffer_size)
agent = DQNAgent(
env.observation_space,
env.action_space,
replay_buffer,
use_double_dqn=args.use_double_dqn,
lr=args.lr,
batch_size=args.batch_size,
gamma=args.gamma
)
eps_timesteps = args.eps_fraction * float(args.num_steps)
episode_rewards = [0.0]
loss = [0.0]
state = env.reset()
for t in range(args.num_steps):
fraction = min(1.0, float(t) / eps_timesteps)
eps_threshold = args.eps_start + fraction * (args.eps_end - args.eps_start)
sample = random.random()
if sample > eps_threshold:
action = agent.act(np.array(state))
else:
action = env.action_space.sample()
next_state, reward, done, _ = env.step(action)
agent.memory.add(state, action, reward, next_state, float(done))
state = next_state
episode_rewards[-1] += reward
if done:
state = env.reset()
episode_rewards.append(0.0)
if t > args.learning_starts and t % args.learning_freq == 0:
agent.optimise_td_loss()
if t > args.learning_starts and t % args.target_update_freq == 0:
agent.update_target_network()
num_episodes = len(episode_rewards)
if done and args.print_freq is not None and len(episode_rewards) % args.print_freq == 0:
mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
print("********************************************************")
print("steps: {}".format(t))
print("episodes: {}".format(num_episodes))
print("mean 100 episode reward: {}".format(mean_100ep_reward))
print("% time spent exploring: {}".format(int(100 * eps_threshold)))
print("********************************************************")
| StarcoderdataPython |
4810659 | <filename>cocoLRPapi-master/PythonAPI/evalDemoLRP.py
import os
from pycocotools.coco import COCO
from pycocotools.cocoevalLRP import COCOevalLRP
# from boxmapDist import calc_boxmap_FP_FN
def calc_LRP(algorithm,tau=0.50):
#initialize COCO ground truth api
annFile = os.path.join('./','LRP','output',algorithm,"targets.json")
cocoGt=COCO(annFile)
#initialize COCO detections api
resFile = os.path.join('./','LRP','output',algorithm,"results.json")
#resFile = os.path.join(root_path,algorithm,"results.json")
cocoDt=cocoGt.loadRes(resFile)
# running evaluation
DetailedLRPResultNeeded=0
cocoEvalLRP = COCOevalLRP(cocoGt,cocoDt,tau)
cocoEvalLRP.evaluate()
cocoEvalLRP.accumulate()
cocoEvalLRP.summarize(DetailedLRPResultNeeded)
# def calc_BoxmapDist():
# gt_paths = glob.glob(os.path.join(root_path,algorithm,"boxmap/gt","*"))
# pred_paths = glob.glob(os.path.join(root_path,algorithm,"boxmap/pred","*"))
# FP_mean, FN_mean = calc_boxmap_FP_FN(gt_paths,pred_paths)
# print("FP_mean: {0}, FN_mean: {1}".format(FP_mean,FN_mean))
# calc_LRP(.50)
# calc_BoxmapDist()
| StarcoderdataPython |
3325819 | import json
import logging
import aiohttp
from expiringdict import ExpiringDict
from datetime import datetime, timedelta
from .const import (
DEFAULT_DATA_CACHE_SECONDS,
DEFAULT_NAS_LANGUAGE,
DEFAULT_PROTOCOL,
LINKSTATION_API_ACTION_PARAM_NAME,
LINKSTATION_API_AUTH_REPONSE_PAGEMODE,
LINKSTATION_API_AUTH_REPONSE_SID,
LINKSTATION_API_GETALLDISK_FUNC_NAME,
LINKSTATION_API_GETSETTINGS_FUNC_NAME,
LINKSTATION_API_LOGIN_FUNC_NAME,
LINKSTATION_API_REBOOT_ACTION_NAME,
LINKSTATION_API_REBOOT_FUNC_NAME,
LINKSTATION_API_PARAM_PASSWORD,
LINKSTATION_API_PARAM_USERNAME,
LINKSTATION_API_ENDPOINT,
LINKSTATION_API_FUNCTION_PARAM_NAME,
LINKSTATION_API_REPONSE_DATA_DISK_AMOUNT_USED,
LINKSTATION_API_REPONSE_DATA_DISK_CAPACITY,
LINKSTATION_API_REPONSE_DATA_DISK_ELEMENT,
LINKSTATION_API_REPONSE_DATA_DISK_PCT_USED,
LINKSTATION_API_REPONSE_DATA_DISK_STATUS,
LINKSTATION_API_REPONSE_DATA_DISK_STATUS_DISCONNECT,
LINKSTATION_API_REPONSE_DATA_DISK_STATUS_REMOVE,
LINKSTATION_API_REPONSE_DATA_DISK_UNITNAME,
LINKSTATION_API_REPONSE_DATA_GENERALINFO_ELEMENT,
LINKSTATION_API_REPONSE_DATA_ELEMENT,
LINKSTATION_API_REPONSE_SUCCESS_STATUS,
LINKSTATION_COOKIE_PREFIX,
LINKSTATION_COOKIE_SEPARATOR,
)
_LOGGER = logging.getLogger(__name__)
class LinkStation:
"""A class for manage LinkStation instance."""
def __init__(
self,
username,
password,
address,
session=None,
language=DEFAULT_NAS_LANGUAGE,
protocol=DEFAULT_PROTOCOL,
cache_age=DEFAULT_DATA_CACHE_SECONDS,
) -> None:
self._username = username
self._password = password
self._address = address
self._language = language
self._protocol = protocol
self._session = session
self._api = None
self._cache = ExpiringDict(max_len=10, max_age_seconds=cache_age)
self._authentication_expire_at = None
def _authentication_required(self) -> bool:
if (self._authentication_expire_at is None):
return True
if (self._authentication_expire_at < datetime.now):
return True
return False
async def get_data_with_cache_async(self):
data = self._cache.get(LINKSTATION_API_REPONSE_DATA_ELEMENT)
if data is None:
_LOGGER.debug("Data key{%s} missing from cache", LINKSTATION_API_REPONSE_DATA_ELEMENT)
data = await self.get_disks_info_async()
return data
async def connect_async(self):
self._api = "{}://{}/{}".format(
self._protocol, self._address, LINKSTATION_API_ENDPOINT
)
formdata = aiohttp.FormData()
formdata.add_field(
LINKSTATION_API_FUNCTION_PARAM_NAME, LINKSTATION_API_LOGIN_FUNC_NAME
)
formdata.add_field(LINKSTATION_API_PARAM_USERNAME, self._username)
formdata.add_field(LINKSTATION_API_PARAM_PASSWORD, self._password)
if self._session is None or self._session.closed:
self._session = aiohttp.ClientSession()
async with self._session.post(self._api, data=formdata) as authresp:
_LOGGER.debug(await authresp.text())
authData = json.loads(await authresp.text())
if self._is_success(authData):
self._sid = self._get_user_sid(authData)
self._pagemode = self._get_pagemode(authData)
self._cookies = self._create_authentication_cookie()
self._authentication_expire_at = datetime.now() + timedelta(minutes=10)
else:
_LOGGER.error("Authentication failed")
def _is_success(self, authresponsejson):
return authresponsejson[LINKSTATION_API_REPONSE_SUCCESS_STATUS]
def _get_user_sid(self, authresponsejson):
return authresponsejson[LINKSTATION_API_REPONSE_DATA_ELEMENT][0][
LINKSTATION_API_AUTH_REPONSE_SID
]
def _get_pagemode(self, authresponsejson):
return authresponsejson[LINKSTATION_API_REPONSE_DATA_ELEMENT][0][
LINKSTATION_API_AUTH_REPONSE_PAGEMODE
]
def _create_authentication_cookie(self):
return {
LINKSTATION_COOKIE_PREFIX
+ self._username: self._sid
+ LINKSTATION_COOKIE_SEPARATOR
+ self._language
+ LINKSTATION_COOKIE_SEPARATOR
+ str(self._pagemode)
}
async def _get_settings_info(self):
if self._session == None or self._session.closed or self._authentication_required:
await self.connect_async()
params = {
LINKSTATION_API_FUNCTION_PARAM_NAME: LINKSTATION_API_GETSETTINGS_FUNC_NAME
}
try:
async with self._session.get(
self._api, params=params, cookies=self._cookies
) as settingresp:
settingInfo = json.loads(await settingresp.text())
if self._is_success(settingInfo):
_LOGGER.debug(await settingresp.text())
self._cache['settingInfo'] = settingInfo
return settingInfo
return None
# here, the async with context for the response ends, and the response is
# released.
except aiohttp.ClientConnectionError:
# something went wrong with the exception, decide on what to do next
_LOGGER.error("Oops, the connection was dropped before we finished", exc_info=True)
except aiohttp.ClientError as client_error:
# something went wrong in general. Not a connection error, that was handled
# above.
_LOGGER.error("Oops, something else went wrong with the request %s", client_error.with_traceback, exc_info=True)
async def get_spaces_info_desc_async(self):
return await self._get_settingsinfo_field_async("r_storage")
async def get_linkstation_name_async(self):
return await self._get_settingsinfo_field_async("r_hostname")
async def get_linkstation_ipaddress_async(self):
return await self._get_settingsinfo_field_async("r_ipAddr:1")
async def get_linkstation_firmware_version_async(self):
return await self._get_settingsinfo_field_async("r_version")
async def _get_settingsinfo_field_async(self, fieldname):
if self._cache.get("settingInfo") == None:
_LOGGER.debug("Setting Info {%s} missing from cache", fieldname)
settingInfo = await self._get_settings_info()
else:
settingInfo = self._cache.get("settingInfo")
for data in settingInfo[LINKSTATION_API_REPONSE_DATA_ELEMENT][0][
LINKSTATION_API_REPONSE_DATA_GENERALINFO_ELEMENT
]:
if data["name"] == fieldname:
_LOGGER.debug(fieldname + ": " + data["value"])
return data["value"]
async def restart_async(self):
if self._session == None or self._session.closed or self._authentication_required:
await self.connect_async()
formdata = aiohttp.FormData()
formdata.add_field(
LINKSTATION_API_FUNCTION_PARAM_NAME, LINKSTATION_API_REBOOT_FUNC_NAME
)
formdata.add_field(
LINKSTATION_API_ACTION_PARAM_NAME, LINKSTATION_API_REBOOT_ACTION_NAME
)
async with self._session.post(
self._api, data=formdata, cookies=self._cookies
) as rebootresp:
rebootInfo = json.loads(await rebootresp.text())
_LOGGER.debug(await rebootresp.text())
if self._is_success(rebootInfo):
_LOGGER.info("LinkStation restarting ... ")
async def get_disks_info_with_cache_async(self):
if self._cache.get(LINKSTATION_API_REPONSE_DATA_ELEMENT):
return self._cache.get(LINKSTATION_API_REPONSE_DATA_ELEMENT)
return await self.get_disks_info_async()
async def get_disks_info_async(self):
if self._session == None or self._session.closed or self._authentication_required:
await self.connect_async()
formdata = aiohttp.FormData()
formdata.add_field(
LINKSTATION_API_FUNCTION_PARAM_NAME, LINKSTATION_API_GETALLDISK_FUNC_NAME
)
try:
async with self._session.post(
self._api, data=formdata, cookies=self._cookies
) as getdisksresp:
response_str = await getdisksresp.text()
getdisksinfo = json.loads(response_str)
_LOGGER.debug(response_str)
if self._is_success(getdisksinfo):
self._cache[LINKSTATION_API_REPONSE_DATA_ELEMENT] = getdisksinfo[LINKSTATION_API_REPONSE_DATA_ELEMENT]
return getdisksinfo[LINKSTATION_API_REPONSE_DATA_ELEMENT]
else:
_LOGGER.error("Server return unsuccess response %s %s", getdisksresp.reason, response_str)
return None
# here, the async with context for the response ends, and the response is
# released.
except aiohttp.ClientConnectionError:
# something went wrong with the exception, decide on what to do next
_LOGGER.error("Oops, the connection was dropped before we finished", exc_info=True)
except aiohttp.ClientError as client_error:
# something went wrong in general. Not a connection error, that was handled
# above.
_LOGGER.error("Oops, something else went wrong with the request %s", client_error.with_traceback, exc_info=True)
return None
async def get_all_disks_async(self):
diskInfo = await self.get_data_with_cache_async()
disk_list = []
for dataelement in diskInfo:
disk_list.append(dataelement[LINKSTATION_API_REPONSE_DATA_DISK_ELEMENT])
return disk_list
async def get_active_disks_async(self):
disk_list = await self.get_all_disks_async()
active_list = []
for disk in disk_list:
if await self.get_disk_status_async(disk) not in (
LINKSTATION_API_REPONSE_DATA_DISK_STATUS_REMOVE,
LINKSTATION_API_REPONSE_DATA_DISK_STATUS_DISCONNECT,
):
active_list.append(disk)
return active_list
async def get_disk_status_async(self, diskName):
diskInfo = await self.get_data_with_cache_async()
if diskInfo is None :
return None
for data in diskInfo:
if data[LINKSTATION_API_REPONSE_DATA_DISK_ELEMENT] == diskName:
return data[LINKSTATION_API_REPONSE_DATA_DISK_STATUS]
return None
def get_disk_status(self, diskName):
diskInfo = self._cache.get(LINKSTATION_API_REPONSE_DATA_ELEMENT)
for data in diskInfo:
if data[LINKSTATION_API_REPONSE_DATA_DISK_ELEMENT] == diskName:
return data[LINKSTATION_API_REPONSE_DATA_DISK_STATUS]
return None
async def get_disk_data_async(self, diskName):
diskInfo = await self.get_data_with_cache_async()
for data in diskInfo:
if data[LINKSTATION_API_REPONSE_DATA_DISK_ELEMENT] == diskName:
return data
return None
def get_disk_data(self, diskName):
diskInfo = self._cache.get(LINKSTATION_API_REPONSE_DATA_ELEMENT)
for data in diskInfo:
if data[LINKSTATION_API_REPONSE_DATA_DISK_ELEMENT] == diskName:
return data
return None
async def get_disk_capacity_async(self, diskName) -> int:
"""Get disk capacity, data return in GB"""
diskInfo = await self.get_data_with_cache_async()
for data in diskInfo:
if data[LINKSTATION_API_REPONSE_DATA_DISK_ELEMENT] == diskName:
return self._format_disk_space(
data[LINKSTATION_API_REPONSE_DATA_DISK_CAPACITY]
)
return None
def get_disk_capacity(self, diskName) -> int:
"""Get disk capacity, data return in GB"""
diskInfo = self._cache.get(LINKSTATION_API_REPONSE_DATA_ELEMENT)
if diskInfo is None:
return None
for data in diskInfo:
if data[LINKSTATION_API_REPONSE_DATA_DISK_ELEMENT] == diskName:
return self._format_disk_space(
data[LINKSTATION_API_REPONSE_DATA_DISK_CAPACITY]
)
return None
async def get_disk_amount_used_async(self, diskName) -> int:
"""Get disk spaces used, data return in GB"""
diskInfo = await self.get_data_with_cache_async()
for data in diskInfo:
if data[LINKSTATION_API_REPONSE_DATA_DISK_ELEMENT] == diskName:
return self._format_disk_space(
data[LINKSTATION_API_REPONSE_DATA_DISK_AMOUNT_USED]
)
return None
def get_disk_amount_used(self, diskName) -> int:
"""Get disk spaces used, data return in GB"""
diskInfo = self._cache.get(LINKSTATION_API_REPONSE_DATA_ELEMENT)
if diskInfo is None:
return None
for data in diskInfo:
if data[LINKSTATION_API_REPONSE_DATA_DISK_ELEMENT] == diskName:
return self._format_disk_space(
data[LINKSTATION_API_REPONSE_DATA_DISK_AMOUNT_USED]
)
return None
def _format_disk_space(self, diskSpaceStr: str) -> int:
number = diskSpaceStr.removesuffix(" KB").replace(",", "")
return round(int(number) / 1024 / 1024)
def _format_disk_pct(self, diskPct: str) -> float:
percentUsed = diskPct.removesuffix(" %")
return float(percentUsed)
async def get_disk_pct_used_async(self, diskName) -> float:
"""Get disk space used, data return in percentage"""
diskInfo = await self.get_data_with_cache_async()
for data in diskInfo:
if data[LINKSTATION_API_REPONSE_DATA_DISK_ELEMENT] == diskName:
return self._format_disk_pct(
data[LINKSTATION_API_REPONSE_DATA_DISK_PCT_USED]
)
return None
def get_disk_pct_used(self, diskName) -> float:
"""Get disk space used, data return in percentage"""
diskInfo = self._cache.get(LINKSTATION_API_REPONSE_DATA_ELEMENT)
if diskInfo is None:
return None
for data in diskInfo:
if data[LINKSTATION_API_REPONSE_DATA_DISK_ELEMENT] == diskName:
return self._format_disk_pct(
data[LINKSTATION_API_REPONSE_DATA_DISK_PCT_USED]
)
return None
async def get_disk_free_async(self, diskName) -> int:
"""Get disk space used, data return in percentage"""
diskInfo = await self.get_data_with_cache_async()
for data in diskInfo:
if data[LINKSTATION_API_REPONSE_DATA_DISK_ELEMENT] == diskName:
return self._format_disk_space(
data[LINKSTATION_API_REPONSE_DATA_DISK_CAPACITY]
) - self._format_disk_space(
data[LINKSTATION_API_REPONSE_DATA_DISK_AMOUNT_USED]
)
return None
def get_disk_free(self, diskName) -> int:
"""Get disk space used, data return in percentage"""
diskInfo = diskInfo = self._cache.get(LINKSTATION_API_REPONSE_DATA_ELEMENT)
if diskInfo is None:
return None
for data in diskInfo:
if data[LINKSTATION_API_REPONSE_DATA_DISK_ELEMENT] == diskName:
return self._format_disk_space(
data[LINKSTATION_API_REPONSE_DATA_DISK_CAPACITY]
) - self._format_disk_space(
data[LINKSTATION_API_REPONSE_DATA_DISK_AMOUNT_USED]
)
return None
async def get_disk_unit_name_async(self, diskName):
"""Get HDD manufacturing info."""
diskInfo = await self.get_data_with_cache_async()
for data in diskInfo:
if data[LINKSTATION_API_REPONSE_DATA_DISK_ELEMENT] == diskName:
return data[LINKSTATION_API_REPONSE_DATA_DISK_UNITNAME].strip()
return None
def get_disk_unit_name(self, diskName):
"""Get HDD manufacturing info."""
diskInfo = self._cache.get(LINKSTATION_API_REPONSE_DATA_ELEMENT)
for data in diskInfo:
if data[LINKSTATION_API_REPONSE_DATA_DISK_ELEMENT] == diskName:
return data[LINKSTATION_API_REPONSE_DATA_DISK_UNITNAME].strip()
return None
async def close(self):
if self._session:
await self._session.close()
| StarcoderdataPython |
1832961 | <filename>DB_api.py<gh_stars>0
# -*- coding: utf-8 -*-
#working for Python 2.7.17
"""
Comment: API for interaction with the Deutsch Bahn REST APIs
Special: You HAVE TO subscribe to the chosen "sub-APIs" for all methods to work
"""
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
__version__ = "0.0.1"
__status__ = "In development"
import requests
import json
import datetime
#Base URL and Authorization link
Base_url = "https://api.deutschebahn.com"
auth_link = "Authorization: Bearer "
#API url for Fahrplan_api
Fahrplan_api = Base_url + "/fahrplan-plus/v1"
#API url for Betriebsstellen API
Betriebsstellen = Base_url + "/betriebsstellen/v1"
#API url for Station_data API
Station_data = Base_url + "/stada/v2"
#API url for BahnhofsFotos API
BahnhofsFotos = Base_url + "/bahnhofsfotos/v1"
#API url for Bahn
class API():
"""
Comment: API Class, for the Deutsch Bahn REST APIs
Input: Nothing, it's a class
Output: As Input
Special: Nothing
"""
def __init__(self, token):
"""
Comment: Standard Init method
Input: API secret-token
Output: nothing because this is the init
Special: You have to subscribe to the servives you want to use with this Programm
"""
global auth_link
self.auth_token= auth_link + token
def get_photographs(self, country):
"""
Comment: Gets all photos for one country
Input: Instancename and countrycode of the desired country
Output: All photos for this country in json format
Special: You have to be subcribed to the BahnhofsFotos API in the DB developer website
"""
global BahnhofsFotos
request_link = "/{}/stations"
link = BahnhofsFotos + request_link.format(country)
r = requests.get(link, headers={"Authorization" : self.auth_token, "Accept" : "application/json"})
data = json.loads(r.content)
return data
def get_photographers(self):
"""
Comment: Returns all Photographers and the data for them
Input: Only name of the instance
Output: All Photographers and their stats
Special: Currently the only stats returned is the photographs count
"""
global BahnhofsFotos
request_link = "/photographers"
link = BahnhofsFotos + request_link
r = requests.get(link, headers={"Authorization" : self.auth_token})
data = json.loads(r.content)
return data
def get_country_photographers(self, country):
"""
Comment: Gets all Photographers of a country
Input: Name of instance and countrycode
Output: All Photographers for one country in json Format
Special: similar to get_photographers(), but specific for one country
"""
global BahnhofsFotos
request_link = "/{}/photographers"
link = BahnhofsFotos + request_link.format(country)
r = requests.get(link, headers={"Authorization" : self.auth_token})
data = json.loads(r.content)
return data
def get_photograph_country_stats(self, country):
"""
Comment: Gets all stats for a specific country
Input: Name of instance and countrycode of the desired country
Output: Stats for country in json format
Special: similar to get_stats() but specific for one country
"""
global BahnhofsFotos
request_link = "/{}/stats"
link = BahnhofsFotos + request_link.format(country)
r = requests.get(link, headers={"Authorization" : self.auth_token})
data = json.loads(r.content)
return data
def get_photograph_stats(self):
"""
Comment: Returns totals Stats for the BahnhofsFotos API
Input: Only name of the instance
Output: Stats for the whole BahnhofsFotos API in json format
Special: similar to get_photograph_stats() but with stats for whole API
"""
global BahnhofsFotos
request_link = "/stats"
link = BahnhofsFotos + request_link
r = requests.get(link, headers={"Authorization" : self.auth_token})
data = json.loads(r.content)
return data
def get_photograph_countries(self):
"""
Comment: Returns Countries with specific Details to those countries
Input: Only Name of instance
Output: Details for all Countries in json format
Special: Countrycodes can be taken from this function
"""
global BahnhofsFotos
request_link = "/countries"
link = BahnhofsFotos + request_link
r = requests.get(link, headers={"Authorization" : self.auth_token})
data = json.loads(r.content)
return data
def get_betriebsstellen(self, query_string):
"""
Comment: Returns all Stations of the Deutsche Bahn which match a query string
Input: instance name, query_string is the stations name you are searching for
Output: All matching results to the query string given
Special: Nothing Special
"""
global Betriebsstellen
request_link = "/betriebsstellen"
link = Betriebsstellen + request_link
r = requests.get(link, headers={"Authorization" : self.auth_token}, params={"name" : query_string})
data = json.loads(r.content)
return data
def get_betriebsstelle_abbrev(self, abbreveation):
"""
Comment: Returns all Information for one specific Station by the given abbrevieation or the name
Input: Name of instance, abbreveation is name or abbreveation of the station
Output: All Information regarding a station
Special: You have to be subscribed to this API for this to work
"""
global Betriebsstellen
request_link = "/betriebsstellen/{}"
link = Betriebsstellen + request_link.format(abbreveation)
r = requests.get(link, headers={"Authorization" : self.auth_token})
data = json.loads(r.content)
return data
def get_station_data(self, query_string=None, limit=10000, federal_state=None):
"""
Comment: Get all data for a station
Input: Nothing required, optional: query_string is the exact name of the searched station, limit is the max output the APIs maximum is 10000, federal_staten filters only stations in the given federal state
Output: Json object with result from REST api
Special:
"""
global Station_data
request_link = "/stations"
link = Station_data + request_link
r = requests.get(link, headers={"Authorization" :self.auth_token}, params={"limit" : limit, "searchstring" : query_string, "federalstate" : federal_state})
data = json.loads(r.content)
return data
def get_location(self, query_string):
# To use this function you must subscribe to https://api.deutschebahn.com/fahrplan-plus/v1
"""
Comment: get location details by searching for query_string
Input: query_string is a search string
Output: all matching results, but not only concrete results
Special: Will return all similar query results
"""
global Fahrplan_api
global auth_token
request_link = "/location/{}"
link = Fahrplan_api + request_link.format(query_string)
r = requests.get(link, headers={"Authorization" : self.auth_token})
data = json.loads(r.content)
return data
def get_arrivals(self, id, date=None):
# To use this function you must subscribe to https://api.deutschebahn.com/fahrplan-plus/v1
"""
Comment: gets the arrivalBoard from a Trainstation at a given time
Input: id from get_location, optional: date in the format yyyy-mm-ddThh:mm:ss
Output: all arrivals in the given time
Special: Nothing special
"""
global Fahrplan_api
global auth_token
if date==None:
today = datetime.datetime.now().strftime("%Y-%m-%d")
time = datetime.datetime.now().strftime("%H:%M:%S")
date = str(today) + "T" + str(time)
request_link = "/arrivalBoard/{}"
link = Fahrplan_api + request_link.format(id)
r = requests.get(link, params={"date" : date}, headers={"Authorization" : self.auth_token})
data = json.loads(r.content)
return data
def get_departures(self, id, date=None):
# To use this function you must subscribe to https://api.deutschebahn.com/fahrplan-plus/v1
"""
Comment: Gets all departuers from a station
Input: id from get_location, optional a date in format: yyyy-mm-ddThh:MM:ss
Output: all departures after the given time
Special: needs id from get_location
"""
global Fahrplan_api
if date==None:
today = datetime.datetime.now().strftime("%Y-%m-%d")
time = datetime.datetime.now().strftime("%H:%M:%S")
date = str(today) + "T" + str(time)
request_link = "/departureBoard/{}"
link = Fahrplan_api + request_link.format(id)
r = requests.get(link, params={"date":date}, headers={"Authorization" : self.auth_token})
data = json.loads(r.content)
return data
def get_journey(self, id):
# To use this function you must subscribe to https://api.deutschebahn.com/fahrplan-plus/v1
"""
Comment: Gets journey details given the journey id
Input: detailsId from get_departures or get_arrivals
Output: details about a train
Special:
"""
global Fahrplan_api
request_link = "/journeyDetails/{}"
link = Fahrplan_api + request_link.format(id).replace('%', '%25') # replace % as %25 because of percent encoding
r = requests.get(link, headers={"Authorization" : self.auth_token})
data = json.loads(r.content)
return data
| StarcoderdataPython |
1838030 | <gh_stars>1-10
import numpy as np
from timeit import default_timer as timer
import torch.nn as nn
import torch
from torch import optim
import pandas as pd
class trainer():
def __init__(self, model, params):
"""
"""
# Set model.
self.model = model
# Set training parameters.
self.num_epochs = params[0]
assert(self.num_epochs >= 1)
self.max_epochs_stop = params[1]
self.num_classes = params[2]
self.batch_size = params[3]
self.learning_rate = params[4]
self.print_every = params[5]
# Set validation stopping variables.
self.epochs_no_improve = 0
self.valid_loss_min = np.Inf
self.valid_max_acc = 0
self.history = []
def test(self):
test_loss, test_acc = 0.0, 0.0
for data, target in self.model.test_loader:
try:
data, target = data.cuda(), target.cuda()
except:
pass
output = self.model.model(data)
loss = self.model.criterion(output, target)
test_loss += loss.item() * data.size(0)
_, pred = torch.max(output, dim=1)
correct_tensor = pred.eq(target.data.view_as(pred))
accuracy = torch.mean(correct_tensor.type(torch.FloatTensor))
test_acc += accuracy.item() * data.size(0)
test_loss = test_loss / len(self.model.test_loader.dataset)
test_acc = test_acc / len(self.model.test_loader.dataset)
print(f'\nTest Loss: {test_loss} \tTest Accuracy: {test_acc:.4f}')
self.model.history['acc'].append(test_acc)
self.model.estimate_capacity()
return test_loss, test_acc
def train(self):
"""
Pretty self explanatory, yeah?
"""
# Show number of epochs already trained if using loaded in model weights.
try:
print(f'Model has been trained for: {self.model.epochs} epochs.\n')
except:
self.model.model.epochs = 0
print(f'Starting Training from Scratch.\n')
overall_start = timer()
best_epoch = None
overall_best_epoch = None
for epoch in range(self.num_epochs):
train_loss, valid_loss = 0.0, 0.0
train_acc, valid_acc = 0, 0
self.model.model.train()
start = timer()
for ii, (data, target) in enumerate(self.model.train_loader):
try:
data, target = data.cuda(), target.cuda()
except:
print('Can\'t train on CUDA - not available!')
self.model.optimizer.zero_grad()
output = self.model.model(data)
loss = self.model.criterion(output, target)
loss.backward()
self.model.optimizer.step()
train_loss += loss.item() * data.size(0)
_, pred = torch.max(output, dim=1)
correct_tensor = pred.eq(target.data.view_as(pred))
accuracy = torch.mean(correct_tensor.type(torch.FloatTensor))
train_acc += accuracy.item() * data.size(0)
print(
f'Epoch: {epoch}\t{100 * (ii + 1) / len(self.model.train_loader):.2f}% complete. {timer() - start:.2f} seconds elapsed in epoch.',
end='\r')
self.model.model.epochs += 1
with torch.no_grad():
self.model.model.eval()
for data, target in self.model.valid_loader:
try:
data, target = data.cuda(), target.cuda()
except:
print('Tried CUDA - didn\'t work!')
pass
output = self.model.model(data)
loss = self.model.criterion(output, target)
valid_loss += loss.item() * data.size(0)
_, pred = torch.max(output, dim=1)
correct_tensor = pred.eq(target.data.view_as(pred))
accuracy = torch.mean(
correct_tensor.type(torch.FloatTensor))
valid_acc += accuracy.item() * data.size(0)
train_loss = train_loss / len(self.model.train_loader.dataset)
valid_loss = valid_loss / len(self.model.valid_loader.dataset)
train_acc = train_acc / len(self.model.train_loader.dataset)
valid_acc = valid_acc / len(self.model.valid_loader.dataset)
if (epoch + 1) % self.print_every == 0:
print(
f'\nEpoch: {epoch} \tTraining Loss: {train_loss:.4f} \tValidation Loss: {valid_loss:.4f}'
)
print(
f'\t\tTraining Accuracy: {100 * train_acc:.2f}%\t Validation Accuracy: {100 * valid_acc:.2f}%'
)
if valid_loss < self.valid_loss_min:
torch.save(self.model.model.state_dict(), '_model.ckpt')
self.epochs_no_improve = 0
self.valid_loss_min = valid_loss
valid_best_acc = valid_acc
best_epoch = epoch
overall_best_epoch = best_epoch
else:
self.epochs_no_improve += 1
self.model.model.optimizer = self.model.optimizer | StarcoderdataPython |
3252854 | from django import forms
from .models import Comment, RacerProfile
class AddCommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('racer', 'comment_type', 'text')
labels = {
'comment_type': ('Choose the type of your comment'),
'text': ('Enter your comment'),
}
class RacerProfileForm(forms.ModelForm):
class Meta:
model = RacerProfile
fields = '__all__'
| StarcoderdataPython |
212987 | <reponame>brigitteunger/katas
import unittest
class Solution:
def lengthOfLastWord(self, s: str) -> int:
if not s:
return 0
s = s.rstrip()
words = s.split(' ')
return len(words[-1])
class TestLengthOfLastWord(unittest.TestCase):
def setUp(self):
self.sol = Solution()
def testLengthOfLastWord_1(self):
s = "Hello World"
length = self.sol.lengthOfLastWord(s)
self.assertEqual(length, 5)
def testLengthOfLastWord_2(self):
s = "a "
length = self.sol.lengthOfLastWord(s)
self.assertEqual(length, 1)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3205336 | <gh_stars>0
from turtle import Turtle
MOVE_DISTANCE = 20
class Paddle(Turtle):
def __init__(self, paddle_side):
super().__init__()
self.penup()
self.shape("square")
self.shapesize(stretch_wid=5, stretch_len=1)
self.color("white")
self.paddle_side = paddle_side.lower()
self.choose_paddle_side(side=paddle_side)
def choose_paddle_side(self, side):
if side == "right":
self.goto(350, 0)
elif side == "left":
self.goto(-350, 0)
else:
raise ValueError("Argument 'paddle_side' can be 'right' or 'left'.")
def move_up(self):
y_pos = self.ycor() + MOVE_DISTANCE
self.goto(self.xcor(), y_pos)
def move_down(self):
y_pos = self.ycor() - MOVE_DISTANCE
self.goto(self.xcor(), y_pos)
| StarcoderdataPython |
3437650 | <reponame>factorlibre/web_action_request
from openerp import models, fields, api
class Setting(models.TransientModel):
_name = 'web.action.request.setting'
_description = 'test the request'
_inherit = 'res.config.settings'
action = fields.Many2one('ir.actions.act_window', required=True)
user = fields.Many2one('res.users', default=lambda self: self.env.user,
required=True)
@api.multi
def button_check_action_request(self):
action = self.action.read()[0]
self.sudo(self.user.id).env['action.request'].notify(action)
return True
| StarcoderdataPython |
1707485 | <filename>tests/test_middleware.py
import logging
from io import BytesIO
from wsgiref.util import setup_testing_defaults
from pytest import fixture
from .context import WSGIListenerMiddleware, DEFAULT_LISTENER_LOG_NAME
@fixture
def environ_factory():
def _environ_factory(**kwargs):
environ = dict(kwargs)
setup_testing_defaults(environ)
return environ
return _environ_factory
@fixture
def environ_with_request_body_factory(environ_factory):
def _factory(request_body: BytesIO = None, environ: dict = None):
if not environ:
environ = environ_factory()
if request_body:
environ['wsgi.input'] = request_body
environ['CONTENT_LENGTH'] = request_body.getbuffer().nbytes
return environ
return _factory
def app(environ, start_fn):
start_fn('200 OK', [('Content-Type', 'text/plain')])
yield b'Hello World!\n'
def start_response(status_code, headers, exc_info=None):
return status_code, headers, exc_info
def test_middleware_passthrough(environ_factory):
environ = environ_factory()
wrapped_app = WSGIListenerMiddleware(app)
rv = wrapped_app(environ, start_response)
assert next(rv) == b'Hello World!\n'
def test_middleware_default_response_listener(caplog, environ_factory):
environ = environ_factory()
wrapped_app = WSGIListenerMiddleware(app)
with caplog.at_level(logging.INFO, logger=DEFAULT_LISTENER_LOG_NAME):
wrapped_app(environ, start_response)
assert caplog.text
def test_listeners(environ_with_request_body_factory):
# noinspection PyAttributeOutsideInit,PyShadowingNames
class EchoRequestListener:
def handle(self, environ: dict, request_body: bytes, **kwargs):
self.environ = environ
self.request_body = request_body
# noinspection PyAttributeOutsideInit,PyShadowingNames
class EchoResponseListener:
def handle(self, status_code: int, environ: dict, content_length: int, response_body: bytes,
processing_time: float, **kwargs):
self.status_code = status_code
self.environ = environ
self.content_length = content_length
self.response_body = response_body
self.processing_time = processing_time
request_listener = EchoRequestListener()
response_listener = EchoResponseListener()
body = BytesIO(b'Test')
environ = environ_with_request_body_factory(body)
wrapped_app = WSGIListenerMiddleware(app, request_listeners=[request_listener],
response_listeners=[response_listener])
wrapped_app(environ, start_response)
assert request_listener.environ is environ
assert request_listener.request_body == b'Test'
assert response_listener.status_code
assert response_listener.environ is environ
assert response_listener.response_body == b'Hello World!\n'
assert response_listener.content_length == len(b'Hello World!\n')
assert response_listener.processing_time
| StarcoderdataPython |
8189479 | <filename>api/authorization_endpoint.py
#
# Copyright (C) 2019-2021 Authlete, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the
# License.
import logging
import time
from django.contrib.auth import logout
from django.contrib.auth.models import User
from django.shortcuts import render
from authlete.django.handler.authorization_request_base_handler import AuthorizationRequestBaseHandler
from authlete.django.handler.authorization_request_error_handler import AuthorizationRequestErrorHandler
from authlete.django.handler.no_interaction_handler import NoInteractionHandler
from authlete.django.web.request_utility import RequestUtility
from authlete.dto.authorization_action import AuthorizationAction
from authlete.dto.authorization_fail_action import AuthorizationFailAction
from authlete.dto.authorization_fail_reason import AuthorizationFailReason
from authlete.dto.authorization_fail_request import AuthorizationFailRequest
from authlete.dto.authorization_request import AuthorizationRequest
from authlete.types.prompt import Prompt
from .authorization_page_model import AuthorizationPageModel
from .base_endpoint import BaseEndpoint
from .spi.no_interaction_handler_spi_impl import NoInteractionHandlerSpiImpl
logger = logging.getLogger(__name__)
class AuthorizationEndpoint(BaseEndpoint):
def __init__(self, api):
super().__init__(api)
def handle(self, request):
# Query parameters or form parameters. OIDC Core 1.0 requires that
# the authorization endpoint support both GET and POST methods.
params = RequestUtility.extractParameters(request)
# Call Authlete's /api/auth/authorization API.
res = self.__callAuthorizationApi(params)
# 'action' in the response denotes the next action which this
# authorization endpoint implementation should take.
action = res.action
if action == AuthorizationAction.INTERACTION:
# Process the authorization request with user interaction.
return self.__handleInteraction(request, res)
elif action == AuthorizationAction.NO_INTERACTION:
# Process the authorization request without user interaction.
# The flow reaches here only when the authorization request
# contains 'prompt=none'.
return self.__handleNoInteraction(request, res)
else:
# Handle other error cases.
return self.__handleError(res)
def __callAuthorizationApi(self, parameters):
# Create a request for /api/auth/authorization API.
req = AuthorizationRequest()
req.parameters = parameters
# Call /api/auth/authorization API.
return self.api.authorization(req)
def __handleNoInteraction(self, request, response):
logger.debug("authorization_endpoint: Processing the request without user interaction.")
# Make NoInteractionHandler handle the case of 'prompt=none'.
# An implementation of the NoInteractionHandlerSpi interface
# needs to be given to the constructor of NoInteractionHandler.
return NoInteractionHandler(
self.api, NoInteractionHandlerSpiImpl(request)).handle(response)
def __handleError(self, response):
logger.debug("authorization_endpoint: The request caused an error: {}".format(response.resultMessage))
# Make AuthorizationRequestErrorHandler handle the error case.
return AuthorizationRequestErrorHandler().handle(response)
def __handleInteraction(self, request, response):
logger.debug("authorization_endpoint: Processing the request with user interaction.")
# Prepare a model object which is needed to render the authorization page.
model = self.__prepareModel(request, response)
# In the current implementation, model is None only when there is no user
# who has the required subject.
if model is None:
return self.__authorizationFail(
response.ticket, AuthorizationFailReason.NOT_AUTHENTICATED)
# Store some variables into the session so that they can be
# referred to later in authorization_decision_endpoint.py.
session = request.session
session['ticket'] = response.ticket
session['claimNames'] = response.claims
session['claimLocales'] = response.claimsLocales
# Render the authorization page.
return render(request, 'api/authorization.html', {'model':model})
def __prepareModel(self, request, response):
# Model object used to render the authorization page.
model = AuthorizationPageModel(response)
# Check if login is required.
model.loginRequired = self.__isLoginRequired(request, response)
if model.loginRequired == False:
# The user's name that will be referred to in the authorization page.
model.userName = request.user.first_name or request.user.username
return model
# Logout the user (if a user has logged in).
logout(request)
# If the authorization request does not require a specific 'subject'.
if response.subject is None:
# This simple implementation uses 'login_hint' as the initial
# value of the login ID.
if response.loginHint is not None:
model.loginId = response.loginHint
return model
# The authorization request requires a specific 'subject' be used.
try:
# Find the user whose subject is the required subject.
user = User.objects.get(id=response.subject)
except Exception:
# There is no user who has the required subject.
logger.debug("authorization_endpoint: The request fails because there is no user who has the required subject.")
return None
# The user who is identified by the subject exists.
model.loginId = user.username
model.loginIdReadOnly = 'readonly'
return model
def __isLoginRequired(self, request, response):
# If no user has logged in.
if request.user.is_authenticated == False:
return True
# Check if the 'prompt' parameter includes 'login'.
included = self.__isLoginIncludedInPrompt(response)
if included:
# Login is explicitly required by the client.
# The user has to re-login.
logger.debug("authorization_endpoint: Login is required because 'prompt' includes 'login'.")
return True
# If the authorization request requires a subject.
if response.subject is not None:
# If the current user's subject does not match the required one.
if request.user.id != response.subject:
# The user needs to login with another user account.
logger.debug("authorization_endpoint: Login is required because the current user's subject does not match the required one.")
return True
# Check if the max age has passed since the last time the user logged in.
exceeded = self.__isMaxAgeExceeded(request, response)
if exceeded:
# The user has to re-login.
logger.debug("authorization_endpoint: Login is required because the max age has passed since the last login.")
return True
# Login is not required.
return False
def __isLoginIncludedInPrompt(self, response):
# If the authorization request does not include a 'prompt' parameter.
if response.prompts is None:
return False
# For each value in the 'prompt' parameter.
for prompt in response.prompts:
if prompt == Prompt.LOGIN:
# 'login' is included in the 'prompt' parameter.
return True
# The 'prompt' parameter does not include 'login'.
return False
def __isMaxAgeExceeded(self, request, response):
# If the authorization request does not include a 'max_age' parameter
# and the 'default_max_age' metadata of the client is not set.
if response.maxAge <= 0:
# Don't have to care about the maximum authentication age.
return False
# Calculate the number of seconds that have elapsed since the last login.
age = int(time.time() - request.user.last_login)
if age <= response.maxAge:
# The max age is not exceeded yet.
return False
# The max age has been exceeded.
return True
def __authorizationFail(self, ticket, reason):
# Call /api/auth/authorization/fail API.
handler = AuthorizationRequestBaseHandler(self.api)
return handler.authorizationFail(ticket, reason)
| StarcoderdataPython |
1680583 | from download_file_if_not_exists import download_files_if_not_exist
# Disclaimer: This script is called by qmake or CMake. It is not necessary to call it manually.
# Download all the assets as .zip files from meshes.mailbase.info, unzips them, moves them in assets/*, and finally removes the .zip files.
# Already downloaded assets are skipped.
# The target folder corresponds to the specified ones, e.g.
# FEMFX/car-body-tets
# goes in assets/FEMFX/*
download_files_if_not_exist([ \
"FEMFX/car-body-tets", \
"FEMFX/car-body-tets-convex", \
\
"FEMFX/car-hood-tets", \
"FEMFX/car-wheel0-tets", \
"FEMFX/car-wheel1-tets", \
"FEMFX/car-wheel2-tets", \
"FEMFX/car-wheel3-tets", \
\
"nasa/advanced_crew_escape_suit", \
"nasa/advanced_crew_escape_suit_convex", \
\
"primitives/cube_12k", \
"primitives/cube_big_50k", \
"primitives/cylinder_triagulated", \
"primitives/floor_big_50k", \
"primitives/floor_big_120k", \
"fractal_terrain", \
\
"animals/Armadillo40k", \
"animals/Bunny35k", \
"animals/Frog19k", \
\
"textures/stonetiles_002_diff"])
| StarcoderdataPython |
4874653 | <filename>habitat/tasks/rearrange/sub_tasks/reach_sensors.py
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from habitat.core.embodied_task import Measure
from habitat.core.registry import registry
from habitat.tasks.rearrange.rearrange_sensors import EndEffectorToRestDistance
@registry.register_measure
class RearrangeReachReward(Measure):
cls_uuid: str = "rearrange_reach_reward"
@staticmethod
def _get_uuid(*args, **kwargs):
return RearrangeReachReward.cls_uuid
def __init__(self, *args, sim, config, task, **kwargs):
self._config = config
super().__init__(*args, sim=sim, config=config, task=task, **kwargs)
def reset_metric(self, *args, episode, task, observations, **kwargs):
self._prev = None
task.measurements.check_measure_dependencies(
self.uuid,
[
EndEffectorToRestDistance.cls_uuid,
RearrangeReachSuccess.cls_uuid,
],
)
self.update_metric(
*args,
episode=episode,
task=task,
observations=observations,
**kwargs
)
def update_metric(self, *args, episode, task, observations, **kwargs):
cur_dist = task.measurements.measures[
EndEffectorToRestDistance.cls_uuid
].get_metric()
if self._config.SPARSE_REWARD:
is_succ = task.measurements.measures[
RearrangeReachSuccess.cls_uuid
].get_metric()
self._metric = self._config.SCALE * float(is_succ)
else:
if self._config.DIFF_REWARD:
if self._prev is None:
self._metric = 0.0
else:
self._metric = self._prev - cur_dist
else:
self._metric = -1.0 * self._config.SCALE * cur_dist
self._prev = cur_dist
@registry.register_measure
class RearrangeReachSuccess(Measure):
cls_uuid: str = "rearrange_reach_success"
@staticmethod
def _get_uuid(*args, **kwargs):
return RearrangeReachSuccess.cls_uuid
def __init__(self, *args, sim, config, task, **kwargs):
super().__init__(*args, sim=sim, config=config, task=task, **kwargs)
self._config = config
def reset_metric(self, *args, episode, task, observations, **kwargs):
task.measurements.check_measure_dependencies(
self.uuid,
[
EndEffectorToRestDistance.cls_uuid,
],
)
self.update_metric(
*args,
episode=episode,
task=task,
observations=observations,
**kwargs
)
def update_metric(self, *args, episode, task, observations, **kwargs):
self._metric = (
task.measurements.measures[
EndEffectorToRestDistance.cls_uuid
].get_metric()
< self._config.SUCC_THRESH
)
@registry.register_measure
class AnyReachSuccess(Measure):
cls_uuid: str = "any_reach_success"
@staticmethod
def _get_uuid(*args, **kwargs):
return AnyReachSuccess.cls_uuid
def reset_metric(self, *args, episode, task, observations, **kwargs):
task.measurements.check_measure_dependencies(
self.uuid,
[
RearrangeReachSuccess.cls_uuid,
],
)
self._did_succ = False
self.update_metric(
*args,
episode=episode,
task=task,
observations=observations,
**kwargs
)
def update_metric(self, *args, episode, task, observations, **kwargs):
self._did_succ = (
self._did_succ
or task.measurements.measures[
RearrangeReachSuccess.cls_uuid
].get_metric()
)
self._metric = self._did_succ
| StarcoderdataPython |
6511935 | from ExcelToSQL import ExcelToSQL as esql
esql()
# excToSQL.TakeUserRequirements()
# excToSQL.CheckWhichOperationToRun() | StarcoderdataPython |
11237742 | <filename>boards/ZCU111/rfsoc_qpsk/drivers/qpsk_overlay.py
from pynq import Overlay
import xrfclk
import xrfdc
import os
import numpy as np
import ipywidgets as ipw
from rfsoc_qpsk import dma_timer, sdr_plots, qpsk_rx, qpsk_tx
class TimerRegistry():
"""Helper class to track active timer threads.
This can be used to help safely stop any orphaned DMA timers.
Orphans appear when a cell is re-run while its DMA timer is active.
"""
def __init__(self):
self.registry = dict()
def register_timers(self, key, timers):
"""Register a list of timers with the registry.
This will safely stop any timers that were previously registered with
the same key.
key: String name for this timer group
timers: List of DmaTimer objects
"""
if key in self.registry:
[timer.stop() for timer in self.registry[key]]
self.registry[key] = timers
class QpskOverlay(Overlay):
"""Overlay subclass for rfsoc-qpsk.
Performs initialisation (including RF components) and exposes them with
more friendly names in a flatter hierarchy. Less typing for everyone.
"""
def __init__(self, bitfile_name=None, init_rf_clks=True, dark_theme=False, presentation_mode=False, **kwargs):
"""Construct a new QpskOverlay
bitfile_name: Optional. If left None, the 'rfsoc_qpsk.bit' bundled with this
rfsoc-qpsk package will be used.
init_rf_clks: If true (default), the reference clocks are configured
for all tiles. If the clocks are already configured, set
to false for faster execution.
dark_theme: Flat to enable a dark theme for plots
presentation_mode: Flag to enable a dark theme with thick lines and
bigger font
"""
# Generate default bitfile name
if bitfile_name is None:
this_dir = os.path.dirname(__file__)
bitfile_name = os.path.join(this_dir, 'bitstream', 'rfsoc_qpsk.bit')
# Set optional theming for dark mode
if dark_theme:
from IPython.display import display, HTML
import plotly.io as pio
# Apply plotly theming
dark_template = pio.templates['plotly_dark']
dark_template.layout.paper_bgcolor = 'rgb(0,0,0,0)'
dark_template.layout.plot_bgcolor = 'rgb(0,0,0,0)'
dark_template.layout.legend.bgcolor = 'rgb(0,0,0,0)'
pio.templates['dark_plot'] = dark_template
pio.templates.default = 'dark_plot'
# Set optional theming for presentation mode
if presentation_mode:
from IPython.display import display, HTML
import plotly.io as pio
# Apply plotly theming
pio.templates.default = 'plotly_dark+presentation'
# Force dark style for ipywidget tab background
display(HTML("""
<style>
.jupyter-widgets.widget-tab > .widget-tab-contents {
background: inherit !important;
}
</style>
"""))
# Create Overlay
super().__init__(bitfile_name, **kwargs)
# Extact in-use dataconverter objects with friendly names
self.rf = self.usp_rf_data_converter_0
self.adc_tile = self.rf.adc_tiles[0]
self.adc_block = self.adc_tile.blocks[0]
self.dac_tile = self.rf.dac_tiles[1]
self.dac_block = self.dac_tile.blocks[2]
# Start up LMX clock
if init_rf_clks:
xrfclk.set_all_ref_clks(409.6)
# Set sane DAC defaults
self.dac_tile.DynamicPLLConfig(1, 409.6, 1228.8)
self.dac_block.NyquistZone = 2
self.dac_block.MixerSettings = {
'CoarseMixFreq': xrfdc.COARSE_MIX_BYPASS,
'EventSource': xrfdc.EVNT_SRC_IMMEDIATE,
'FineMixerScale': xrfdc.MIXER_SCALE_1P0,
'Freq': 1000,
'MixerMode': xrfdc.MIXER_MODE_C2R,
'MixerType': xrfdc.MIXER_TYPE_FINE,
'PhaseOffset': 0.0
}
self.dac_block.UpdateEvent(xrfdc.EVENT_MIXER)
self.dac_tile.SetupFIFO(True)
# Set sane ADC defaults
self.adc_tile.DynamicPLLConfig(1, 409.6, 1228.8)
self.adc_block.NyquistZone = 2
self.adc_block.MixerSettings = {
'CoarseMixFreq': xrfdc.COARSE_MIX_BYPASS,
'EventSource': xrfdc.EVNT_SRC_TILE,
'FineMixerScale': xrfdc.MIXER_SCALE_1P0,
'Freq': 1000,
'MixerMode': xrfdc.MIXER_MODE_R2C,
'MixerType': xrfdc.MIXER_TYPE_FINE,
'PhaseOffset': 0.0
}
self.adc_block.UpdateEvent(xrfdc.EVENT_MIXER)
self.adc_tile.SetupFIFO(True)
# Touch RX and TX drivers for strict evaluation
self.qpsk_tx.qpsk_tx.enable=1
self.qpsk_rx.qpsk_rx_dec.enable=1
self.qpsk_rx.qpsk_rx_csync.enable=1
self.qpsk_rx.qpsk_rx_rrc.enable=1
self.qpsk_rx.qpsk_rx_tsync.enable=1
self.timers = TimerRegistry()
def init_i2c(self):
"""Initialize the I2C control drivers on RFSoC2x2.
This should happen after a bitstream is loaded since I2C reset
is connected to PL pins. The I2C-related drivers are made loadable
modules so they can be removed or inserted.
"""
module_list = ['i2c_dev', 'i2c_mux_pca954x', 'i2c_mux']
for module in module_list:
cmd = "if lsmod | grep {0}; then rmmod {0}; fi".format(module)
ret = os.system(cmd)
if ret:
raise RuntimeError(
'Removing kernel module {} failed.'.format(module))
module_list.reverse()
for module in module_list:
cmd = "modprobe {}".format(module)
ret = os.system(cmd)
if ret:
raise RuntimeError(
'Inserting kernel module {} failed.'.format(module))
def plot_group(self, group_name, domains, get_time_data, fs, get_freq_data=None, get_const_data=None):
"""Create a group of plots for a given set of data generators.
group_name: String name for plot group (used to register timers with
the TimerRegistry)
domains: List of plot types to generate. Select from:
['time','time-binary','frequency','constellation'].
fs: Sampling frequency. Used for time axis scaling
get_time_data: Callback function that returns a buffer of time domain
samples
get_freq_data: Optional callback that returns a buffer of frequency
domain samples. When not specified, a software FFT will
be performed on the get_time_data callback instead.
get_const_data: Optional callback that returns a buffer of time-domain
data for any constellation plots. When not specified,
the get_time_data callback will be used.
"""
plots = []
def many(f, n=4):
return np.concatenate([f() for _ in range(n)])
for domain in domains:
if domain=='frequency':
# HW accelerated FFT
if get_freq_data != None:
f_plot = sdr_plots.HWFreqPlot(
[get_freq_data() for _ in range(4)],
fs, animation_period=100, w=700)
f_dt = dma_timer.DmaTimer(f_plot.add_frame, get_freq_data, 0.3)
# SW FFT
else:
f_plot = sdr_plots.IQFreqPlot(
[many(get_time_data) for _ in range(4)],
fs, x_range=(-2000,2000), animation_period=100, w=700)
f_dt = dma_timer.DmaTimer(f_plot.add_frame, lambda:many(get_time_data), 0.3)
plots.append(dict(title='Frequency domain', plot=f_plot, control=f_dt))
elif domain=='time' or domain=='time-binary':
if domain=='time-binary':
iq_plot = sdr_plots.IQTimePlot(many(get_time_data), fs, w=700, scaling=1, ylabel='Symbol value')
iq_plot.set_line_mode(lines=True, markers=True, shape='hvh')
iq_plot.get_widget().layout.yaxis.dtick=1
else:
iq_plot = sdr_plots.IQTimePlot(many(get_time_data), fs, w=700)
iq_plot.set_line_mode(markers=False)
iq_dt = dma_timer.DmaTimer(iq_plot.add_data, get_time_data, 0.05)
plots.append(dict(title='Time domain', plot=iq_plot, control=iq_dt))
elif domain=='constellation':
c_plot = sdr_plots.IQConstellationPlot(many(get_const_data or get_time_data, n=10), h=550, fade=True)
c_dt = dma_timer.DmaTimer(c_plot.add_data, get_const_data or get_time_data, 0.05)
plots.append(dict(title='Constellation', plot=c_plot, control=c_dt,
layout=ipw.Layout(width='550px', margin='auto')))
self.timers.register_timers(group_name, list(map(lambda tab: tab['control'], plots)))
return QpskOverlay.tab_plots(plots)
@staticmethod
def tab_plots(tabs):
"""Helper function to generate a Tab widget given a list of definitions.
tabs: A list of dicts describing a single tab. Each element needs three
keys: 'plot' with a SdrPlot object, 'control' with a DmaTimer
object, and 'title' with a string.
"""
widgets = []
titles = []
for tab in tabs:
widgets.append(ipw.VBox([
tab['plot'].get_widget(),tab['control'].get_widget()
],layout=tab.get('layout',ipw.Layout())))
titles.append(tab['title'])
tab_widget = ipw.Tab(widgets)
for i, title in enumerate(titles):
tab_widget.set_title(i, title)
QpskOverlay._tab_load_resizer_callback(tab_widget)
return tab_widget
@staticmethod
def _tab_load_resizer_callback(tabs):
"""Helper function to handle relative widths for plots in hidden tabs"""
out = ipw.Output()
display(out)
@out.capture()
def callback(change):
plot = tabs.children[change['new']].children[0]
plot.layout.autosize = False
plot.layout.autosize = True
tabs.observe(callback, names='selected_index')
def _tx_display_generator(self):
tx_plot_names = ['Symbols', 'Post TX RRC']
plot_tx_symbol = self.plot_group(
'tx_symbol', ['time-binary'], self.qpsk_tx.get_symbols, fs=500
)
plot_tx_shaped = self.plot_group(
'tx_shaped', ['time', 'frequency'], self.qpsk_tx.get_shaped_time, fs=4000,
get_freq_data=self.qpsk_tx.get_shaped_fft
)
tx_display_widgets = ipw.Accordion(children=[plot_tx_symbol,
plot_tx_shaped])
for i in range(0, 2):
tx_display_widgets.set_title(i, tx_plot_names[i])
return tx_display_widgets
def _rx_display_generator(self):
def classify_bits(frame):
bit_quantise = lambda b: 1 if b>0 else 0
symbol_quantise = lambda i, q: bit_quantise(i) + 1j*bit_quantise(q)
return np.fromiter(
map(symbol_quantise, np.real(frame), np.imag(frame)),
dtype=np.complex
)
rx_domains = ['time', 'frequency', 'constellation']
rx_plot_names = ['Decimation', 'Coarse Sync', 'Post RX RRC', 'Time Sync']
plot_rx_decimated = self.plot_group(
'rx_decimated', rx_domains, self.qpsk_rx.get_decimated, fs=4000
)
plot_rx_coarse_sync = self.plot_group(
'rx_coarse_sync', rx_domains, self.qpsk_rx.get_coarse_synced, fs=4000
)
plot_rx_rrced = self.plot_group(
'rx_rrced', rx_domains, self.qpsk_rx.get_rrced, fs=16000
)
plot_rx_constellation = self.plot_group(
'rx_data', ['constellation', 'time-binary'],
lambda : classify_bits(self.qpsk_rx.get_data()), fs=500,
get_const_data=self.qpsk_rx.get_data
)
rx_display_widgets = ipw.Accordion(children=[plot_rx_decimated,
plot_rx_coarse_sync,
plot_rx_rrced,
plot_rx_constellation])
for i in range(0, 4):
rx_display_widgets.set_title(i, rx_plot_names[i])
return rx_display_widgets
def _rx_simple_display_generator(self):
plot_rx_constellation = self.plot_group(
'rx_data', ['constellation'], self.qpsk_rx.get_data, fs=500,
get_const_data=self.qpsk_rx.get_data
)
return plot_rx_constellation
def _tx_simple_display_generator(self):
plot_tx_shaped = self.plot_group(
'tx_shaped', ['time', 'frequency'], self.qpsk_tx.get_shaped_time, fs=4000,
get_freq_data=self.qpsk_tx.get_shaped_fft
)
return plot_tx_shaped
def _common_control_generator(self):
def unwrap_slider_val(callback):
return lambda slider_val : callback(slider_val['new'])
def update_nco(rf_block, nco_freq):
mixer_cfg = rf_block.MixerSettings
mixer_cfg['Freq'] = nco_freq
rf_block.MixerSettings = mixer_cfg
rf_block.UpdateEvent(xrfdc.EVENT_MIXER)
def new_nco_slider(title):
return ipw.FloatSlider(
value=1000,
min=620,
max=1220,
step=20,
description=title,
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.1f',
style = {'description_width': 'initial'}
)
pow_slider = ipw.SelectionSlider(
options=[0.1, 0.3, 0.6, 1],
value=1,
description='Transmit Power:',
style = {'description_width': 'initial'}
)
pow_slider.observe(unwrap_slider_val(self.qpsk_tx.set_gain), names='value')
tx_nco_slider = new_nco_slider('TX Centre Frequency (MHz)')
rx_nco_slider = new_nco_slider('RX Centre Frequency (MHz)')
ipw.link((rx_nco_slider, 'value'), (tx_nco_slider, 'value'))
tx_nco_slider.observe(
unwrap_slider_val(lambda v: update_nco(self.dac_block, v)),
names='value'
)
rx_nco_slider.observe(
unwrap_slider_val(lambda v: update_nco(self.adc_block, v)),
names='value'
)
control_widgets = ipw.Accordion(children=[ipw.VBox([
pow_slider,
tx_nco_slider,
rx_nco_slider])])
control_widgets.set_title(0, 'System Control')
return control_widgets
def _qpsk_generator(self):
tx_display_widget = self._tx_simple_display_generator()
rx_display_widget = self._rx_simple_display_generator()
common_control_widget = self._common_control_generator()
control_accordion = ipw.Accordion(children=[common_control_widget])
tx_display_accordion = ipw.Accordion(children=[tx_display_widget])
control_accordion.set_title(0, 'System Control')
tx_display_accordion.set_title(0, 'Transmitter Visualisation')
side_bar = ipw.VBox([control_accordion, tx_display_accordion])
main_app = ipw.Accordion(children=[rx_display_widget])
main_app.set_title(0, 'Receiver Visualisation')
return ipw.HBox([side_bar, main_app])
def qpsk_demonstrator_application(self):
app = self._qpsk_generator()
return app
| StarcoderdataPython |
3234590 | #
# Verified
#
from manimlib.imports import *
# Helpers
def get_shadow(mobject, opacity=0.5):
result = mobject.deepcopy()
result.apply_function(lambda p: [p[0], p[1], 0])
color = interpolate_color(
mobject.get_fill_color(), BLACK,
mobject.get_fill_opacity()
)
# color = BLACK
result.set_fill(color, opacity=opacity)
result.set_stroke(BLACK, 0.5, opacity=opacity)
result.set_shade_in_3d(False)
return result
def get_boundary_points(shadow, n_points=20):
points = shadow.get_points_defining_boundary()
return np.array([
points[np.argmax(np.dot(points, vect.T))]
for vect in compass_directions(n_points)
])
def get_area(planar_mobject):
boundary = get_boundary_points(planar_mobject, 100)
xs = boundary[:, 0]
ys = boundary[:, 1]
dxs = np.append(xs[-1], xs[:-1]) - xs
dys = np.append(ys[-1], ys[:-1]) - ys
return abs(sum([
0.5 * (x * dy - y * dx)
for x, dx, y, dy in zip(xs, dxs, ys, dys)
]))
def get_xy_plane_projection_point(p1, p2):
"""
Draw a line from source to p1 to p2. Where does it
intersect the xy plane?
"""
vect = p2 - p1
return p1 - (p1[2] / vect[2]) * vect
# Scenes
class ShowShadows(ThreeDScene):
CONFIG = {
"object_center": [0, 0, 3],
"area_label_center": [0, -1.5, 0],
"surface_area": 6.0,
"num_reorientations": 10,
"camera_config": {
"light_source_start_point": 10 * OUT,
"frame_center": [0, 0, 0.5],
},
"initial_orientation_config": {
"phi": 60 * DEGREES,
"theta": -120 * DEGREES,
}
}
def setup(self):
self.add_plane()
self.setup_orientation_trackers()
self.setup_object_and_shadow()
self.add_shadow_area_label()
self.add_surface_area_label()
def add_plane(self):
plane = self.plane = Rectangle(
width=FRAME_WIDTH,
height=24.2,
stroke_width=0,
fill_color=WHITE,
fill_opacity=0.35,
)
plane.set_sheen(0.2, DR)
grid = NumberPlane(
color=LIGHT_GREY,
secondary_color=DARK_GREY,
y_radius=int(plane.get_height() / 2),
stroke_width=1,
secondary_line_ratio=0,
)
plane.add(grid)
plane.add(VectorizedPoint(10 * IN))
plane.set_shade_in_3d(True, z_index_as_group=True)
self.add(plane)
def setup_orientation_trackers(self):
# Euler angles
self.alpha_tracker = ValueTracker(0)
self.beta_tracker = ValueTracker(0)
self.gamma_tracker = ValueTracker(0)
def setup_object_and_shadow(self):
self.obj3d = always_redraw(self.get_reoriented_object)
self.shadow = always_redraw(lambda: get_shadow(self.obj3d))
def add_shadow_area_label(self):
text = TextMobject("Shadow area: ")
decimal = DecimalNumber(0)
label = VGroup(text, decimal)
label.arrange(RIGHT)
label.scale(1.5)
label.move_to(self.area_label_center - decimal.get_center())
self.shadow_area_label = label
self.shadow_area_decimal = decimal
# def update_decimal(decimal):
# # decimal.set_value(get_area(self.shadow))
# self.add_fixed_in_frame_mobjects(decimal)
# decimal.add_updater(update_decimal)
decimal.add_updater(
lambda d: d.set_value(get_area(self.shadow))
)
decimal.add_updater(
lambda d: self.add_fixed_in_frame_mobjects(d)
)
# self.add_fixed_orientation_mobjects(label)
self.add_fixed_in_frame_mobjects(label)
self.add(label)
self.add(decimal)
def add_surface_area_label(self):
text = TextMobject("Surface area: ")
decimal = DecimalNumber(self.surface_area)
label = VGroup(text, decimal)
label.arrange(RIGHT)
label.scale(1.25)
label.set_fill(YELLOW)
label.set_background_stroke(width=3)
label.next_to(self.obj3d, RIGHT, LARGE_BUFF)
label.shift(MED_LARGE_BUFF * IN)
self.surface_area_label = label
self.add_fixed_orientation_mobjects(label)
def construct(self):
# Show creation
obj3d = self.obj3d.copy()
obj3d.clear_updaters()
temp_shadow = always_redraw(lambda: get_shadow(obj3d))
self.add(temp_shadow)
self.move_camera(
**self.initial_orientation_config,
added_anims=[
LaggedStartMap(DrawBorderThenFill, obj3d)
],
run_time=2
)
self.begin_ambient_camera_rotation(0.01)
self.remove(obj3d, temp_shadow)
average_label = self.get_average_label()
# Reorient
self.add(self.obj3d, self.shadow)
for n in range(self.num_reorientations):
self.randomly_reorient()
if n == 3:
self.add_fixed_in_frame_mobjects(average_label)
self.play(Write(average_label, run_time=2))
else:
self.wait()
def randomly_reorient(self, run_time=3):
a, b, c = TAU * np.random.random(3)
self.play(
self.alpha_tracker.set_value, a,
self.beta_tracker.set_value, b,
self.gamma_tracker.set_value, c,
run_time=run_time
)
#
def get_object(self):
cube = Cube()
cube.set_height(1)
# cube.set_width(2, stretch=True)
cube.set_stroke(WHITE, 0.5)
return cube
def get_reoriented_object(self):
obj3d = self.get_object()
angles = [
self.alpha_tracker.get_value(),
self.beta_tracker.get_value(),
self.gamma_tracker.get_value(),
]
vects = [OUT, RIGHT, OUT]
center = self.object_center
obj3d.move_to(center)
for angle, vect in zip(angles, vects):
obj3d.rotate(angle, vect, about_point=center)
return obj3d
def get_average_label(self):
rect = SurroundingRectangle(
self.shadow_area_decimal,
buff=SMALL_BUFF,
color=RED,
)
words = TextMobject(
"Average", "=",
"$\\frac{\\text{Surface area}}{4}$"
)
words.scale(1.5)
words[0].match_color(rect)
words[2].set_color(self.surface_area_label[0].get_fill_color())
words.set_background_stroke(width=3)
words.next_to(
rect, DOWN,
index_of_submobject_to_align=0,
)
# words.shift(MED_LARGE_BUFF * LEFT)
return VGroup(rect, words)
class ShowInfinitelyFarLightSource(ShowShadows):
CONFIG = {
"num_reorientations": 1,
"camera_center": [0, 0, 1],
}
def construct(self):
self.force_skipping()
ShowShadows.construct(self)
self.revert_to_original_skipping_status()
self.add_light_source_based_shadow_updater()
self.add_light()
self.move_light_around()
self.show_vertical_lines()
def add_light(self):
light = self.light = self.get_light()
light_source = self.camera.light_source
light.move_to(light_source)
light_source.add_updater(lambda m: m.move_to(light))
self.add(light_source)
self.add_fixed_orientation_mobjects(light)
def move_light_around(self):
light = self.light
self.add(self.shadow_area_label)
self.play(
light.move_to, 5 * OUT + DOWN,
run_time=3
)
self.play(Rotating(
light, angle=TAU, about_point=5 * OUT,
rate_func=smooth, run_time=3
))
self.play(
light.move_to, 30 * OUT,
run_time=3,
)
self.remove(light)
def show_vertical_lines(self):
lines = self.get_vertical_lines()
obj3d = self.obj3d
shadow = self.shadow
target_obj3d = obj3d.copy()
target_obj3d.become(shadow)
target_obj3d.match_style(obj3d)
target_obj3d.set_shade_in_3d(False)
source_obj3d = obj3d.copy()
source_obj3d.set_shade_in_3d(False)
source_obj3d.fade(1)
self.play(LaggedStartMap(ShowCreation, lines))
self.wait()
self.add(source_obj3d, lines)
self.play(
ReplacementTransform(source_obj3d, target_obj3d),
run_time=2
)
self.add(target_obj3d, lines)
self.play(FadeOut(target_obj3d),)
self.wait()
lines.add_updater(lambda m: m.become(self.get_vertical_lines()))
for x in range(5):
self.randomly_reorient()
def add_light_source_based_shadow_updater(self):
shadow = self.shadow
light_source = self.camera.light_source
obj3d = self.obj3d
center = obj3d.get_center()
def update(shadow):
lsp = light_source.get_center()
proj_center = get_xy_plane_projection_point(lsp, center)
c_to_lsp = lsp - center
unit_c_to_lsp = normalize(c_to_lsp)
rotation = rotation_matrix(
angle=np.arccos(np.dot(unit_c_to_lsp, OUT)),
axis=normalize(np.cross(unit_c_to_lsp, OUT))
)
new_shadow = get_shadow(
self.obj3d.copy().apply_matrix(rotation)
)
shadow.become(new_shadow)
shadow.scale(get_norm(lsp) / get_norm(c_to_lsp))
shadow.move_to(proj_center)
return shadow
shadow.add_updater(update)
def get_light(self):
n_rings = 40
radii = np.linspace(0, 2, n_rings)
rings = VGroup(*[
Annulus(inner_radius=r1, outer_radius=r2)
for r1, r2 in zip(radii, radii[1:])
])
opacities = np.linspace(1, 0, n_rings)**1.5
for opacity, ring in zip(opacities, rings):
ring.set_fill(YELLOW, opacity)
ring.set_stroke(YELLOW, width=0.1, opacity=opacity)
return rings
def get_vertical_lines(self):
shadow = self.shadow
points = get_boundary_points(shadow, 10)
# half_points = [(p1 + p2) / 2 for p1, p2 in adjacent_pairs(points)]
# points = np.append(points, half_points, axis=0)
light_source = self.light.get_center()
lines = VGroup(*[
DashedLine(light_source, point)
for point in points
])
lines.set_shade_in_3d(True)
for line in lines:
line.remove(*line[:int(0.8 * len(line))])
line[-10:].set_shade_in_3d(False)
line.set_stroke(YELLOW, 1)
return lines
class CylinderShadows(ShowShadows):
CONFIG = {
"surface_area": 2 * PI + 2 * PI * 2,
"area_label_center": [0, -2, 0],
}
def get_object(self):
height = 2
cylinder = ParametricSurface(
lambda u, v: np.array([
np.cos(TAU * v),
np.sin(TAU * v),
height * (1 - u)
]),
resolution=(6, 32)
)
# circle = Circle(radius=1)
circle = ParametricSurface(
lambda u, v: np.array([
(v + 0.01) * np.cos(TAU * u),
(v + 0.01) * np.sin(TAU * u),
0,
]),
resolution=(16, 8)
)
# circle.set_fill(GREEN, opacity=0.5)
for surface in cylinder, circle:
surface.set_fill_by_checkerboard(GREEN, GREEN_E, opacity=1.0)
# surface.set_fill(GREEN, opacity=0.5)
cylinder.add(circle)
cylinder.add(circle.copy().flip().move_to(height * OUT))
cylinder.set_shade_in_3d(True)
cylinder.set_stroke(width=0)
cylinder.scale(1.003)
return cylinder
class PrismShadows(ShowShadows):
CONFIG = {
"surface_area": 3 * np.sqrt(3) / 2 + 3 * (np.sqrt(3) * 2),
"object_center": [0, 0, 3],
"area_label_center": [0, -2.25, 0],
}
def get_object(self):
height = 2
prism = VGroup()
triangle = RegularPolygon(3)
verts = triangle.get_anchors()[:3]
rects = [
Polygon(v1, v2, v2 + height * OUT, v1 + height * OUT)
for v1, v2 in adjacent_pairs(verts)
]
prism.add(triangle, *rects)
prism.add(triangle.copy().shift(height * OUT))
triangle.reverse_points()
prism.set_shade_in_3d(True)
prism.set_fill(PINK, 0.8)
prism.set_stroke(WHITE, 1)
return prism
class TheseFourPiAreSquare(PiCreatureScene):
def construct(self):
pass
def create_pi_creatures(self):
pass
| StarcoderdataPython |
4884751 | <reponame>jenningsm42/mini-mmo-server
from server.server import register_handler
from server.message_type import MessageType
from server.message import Message
from server.proto.PlayerJoin_pb2 import (
PlayersResponse, JoinRequest, PlayerJoin)
from server.service.player import PlayerService
from server.service.character import CharacterService
@register_handler(MessageType.join_request)
async def player_join(message, client, server):
info = JoinRequest()
info.ParseFromString(message.serialized_message)
with CharacterService() as service:
character = service.get(info.character_id)
with PlayerService() as service:
character = service.session.merge(character)
service.create(character)
server.players.update_all_positions()
players_response = PlayersResponse()
for other_character in server.players.characters.values():
player_info = players_response.players.add()
player_info.player_id = other_character.id
player_info.character.x = other_character.last_x
player_info.character.y = other_character.last_y
player_info.velocity_x = other_character.velocity_x
player_info.velocity_y = other_character.velocity_y
player_info.character.body_color = other_character.body_color
player_info.character.shirt_color = other_character.shirt_color
player_info.character.legs_color = other_character.legs_color
player_info.character.name = other_character.name
client.player_id = info.character_id
server.players.add(client, character)
await client.send(Message(
message_type=MessageType.players_response,
message=players_response))
player_join = PlayerJoin()
player_join.player_id = client.player_id
player_join.character.x = character.last_x
player_join.character.y = character.last_y
player_join.character.body_color = character.body_color
player_join.character.shirt_color = character.shirt_color
player_join.character.legs_color = character.legs_color
player_join.character.name = character.name
await server.broadcast(Message(
message_type=MessageType.player_join,
message=player_join),
exclude=client)
@register_handler(MessageType.players_request)
async def players_state(message, client, server):
if not client.player_id:
raise Exception('Received players_request event for invalid player!')
server.players.update_all_positions()
players_response = PlayersResponse()
for character in server.players.players.values():
if character.id == client.player_id:
continue
player_info = players_response.players.add()
player_info.player_id = character.id
player_info.x = character.last_x
player_info.y = character.last_y
player_info.velocity_x = character.velocity_x
player_info.velocity_y = character.velocity_y
await client.send(Message(
message_type=MessageType.players_response,
message=players_response))
| StarcoderdataPython |
1980502 | <filename>src/reader/test_cases/test_perseus_import_lexicon.py
from xml.dom.minidom import parseString
from . import TestReader
from reader.importer.Perseus import PerseusTextImporter
from reader.models import Division, Verse
from reader.importer.batch_import import ImportTransforms
from reader.importer.Lexicon import LexiconImporter
class TestPerseusImportLexicon(TestReader):
"""
# See #2322, https://lukemurphey.net/issues/2322
"""
def setUp(self):
self.importer = PerseusTextImporter(division_tags=["entry", "div0"])
def test_load_lexicon(self):
book_xml = self.load_test_resource('ml.xml')
book_doc = parseString(book_xml)
self.importer.import_xml_document(book_doc)
divisions = Division.objects.filter(work=self.importer.work)
self.assertEquals(len(Verse.objects.filter(division=divisions[1])), 1) # Should have 9 entries for the letter alpha
self.assertEquals(divisions.count(), 15) # Should have two divisions for the letters and 13 for the entries
# Make sure that the division description got converted from beta-code
self.assertEquals(divisions[0].title, '\u0391') # Should be Α
self.assertEquals(str(divisions[0]), "Α") # Should be Α
#self.assertEquals(divisions[0].title_slug, "a") # Should be Α
self.assertEquals(divisions[0].descriptor, "*a")
self.assertEquals(divisions[1].descriptor, "ἀάατος")
#self.assertEquals(str(divisions[1]), "ἀάατος")
# Update the descriptors
ImportTransforms.convert_descriptors_from_beta_code(self.importer.work)
self.assertEquals(divisions[0].descriptor, '\u0391')
# Ensure that the division has a valid readable string
# See https://lukemurphey.net/issues/2355
self.assertEquals(str(divisions[1]), "main ἈΆΑΤΟΣ")
self.assertEquals(divisions[1].get_division_description(use_titles=False), 'Α ἈΆΑΤΟΣ')
def test_find_entries(self):
book_xml = self.load_test_resource('ml.xml')
book_doc = parseString(book_xml)
self.importer.import_xml_document(book_doc)
divisions = Division.objects.filter(work=self.importer.work)
verses = Verse.objects.filter(division=divisions[1])
verse = verses[:1][0]
entries = LexiconImporter.find_perseus_entries(verse)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], "a)a/atos")
| StarcoderdataPython |
6626874 | import os
import numpy as np
import argparse
import json
import torch
import cv2
import scipy.io as sio
import matplotlib.pyplot as plt
import sys
sys.path.append('..')
import PnP
import models
def parse_arguments():
parser = argparse.ArgumentParser(description='PyTorch Training')
parser.add_argument('--config', default='configs/config.json', type=str, help='Path to the config file')
parser.add_argument('--model', default=None, type=str, help='Path to the trained .pth model')
parser.add_argument('--img', default='CS_MRI/file1002252_2_bottomright.pt', type=str, help='Path to the original image')
parser.add_argument('--mask', default='CS_MRI/Q_Random30.pt', type=str, help='Path to the k-space mask file')
parser.add_argument('--jpg', default=True, type=bool, help='file type either jpg or pt')
parser.add_argument('--noise', default='CS_MRI/noises.mat', type=str, help='Path to the k-space noise file')
parser.add_argument('--device', default="cpu", type=str, help='device location')
parser.add_argument('--experiment', default=None, type=str, help='name of the experiment')
parser.add_argument('--algo', default="admm", type=str, help='admm/fbs')
parser.add_argument('--mu_upper', default=3.0, type=float, help='highest value of mu')
parser.add_argument('--mu_lower', default=0.1, type=float, help='lowest value of mu')
parser.add_argument('--mu_step', default=30, type=int, help='step')
parser.add_argument("--sigma", type=float, default=0.05, help="Noise level for the denoising model")
parser.add_argument("--alpha", type=float, default=2.0, help="Step size in Plug-and Play")
parser.add_argument("--maxitr", type=int, default=100, help="Number of iterations")
parser.add_argument("--verbose", type=int, default=1, help="Whether printing the info out")
args = parser.parse_args()
return args
def check_directory(experiment, algo):
if not os.path.exists("Experiments"):
os.makedirs("Experiments")
path = os.path.join("Experiments", algo)
if not os.path.exists(path):
os.makedirs(path)
path = os.path.join(path, experiment)
if not os.path.exists(path):
os.makedirs(path)
return path
def scale(img):
img = (img - np.amin(img)) / (np.amax(img) - np.amin(img))
image = 255 * img
return image
def psnr(x, im_orig):
xout = (x - np.min(x)) / (np.max(x) - np.min(x))
norm1 = np.sum((np.absolute(im_orig)) ** 2)
norm2 = np.sum((np.absolute(x - im_orig)) ** 2)
psnr = 10 * np.log10(norm1 / norm2)
return psnr
if __name__ == '__main__':
# ---- input arguments ----
args = parse_arguments()
# CONFIG -> assert if config is here
assert args.config
config = json.load(open(args.config))
# ---- load the model ----
model = models.DnCNN(config, depth=config["model"]["depth"], n_channels=config["model"]["n_channels"],
image_channels=config["model"]["image_channels"], kernel_size=config["model"]["kernel_size"],
padding=config["model"]["padding"], architecture=config["model"]["architecture"],
spectral_norm=config["model"]["spectral_norm"],
shared_activation=config["model"]["shared_activation"],
shared_channels=config["model"]["shared_channels"], device=args.device)
device = args.device
checkpoint = torch.load(args.model, device)
if device == 'cpu':
for key in list(checkpoint['state_dict'].keys()):
if 'module.' in key:
checkpoint['state_dict'][key.replace('module.', '')] = checkpoint['state_dict'][key]
del checkpoint['state_dict'][key]
try:
model.load_state_dict(checkpoint['state_dict'], strict=True)
except Exception as e:
print(f'Some modules are missing: {e}')
model.load_state_dict(checkpoint['state_dict'], strict=False)
model.float()
model.eval()
if args.device != 'cpu':
model.to(device)
# create the output directory and return the path to it
path = check_directory(args.experiment, args.algo)
with torch.no_grad():
# ---- load the ground truth ----
if args.jpg is True:
im_orig = cv2.imread(f'{args.img}', 0) / 255.0
cv2.imwrite(f'{path}/GroundTruth.png', 255 * im_orig)
else:
im_orig = torch.load(f'{args.img}').numpy()
cv2.imwrite(f'{path}/GroundTruth.png', 255*im_orig)
# ---- load mask matrix ----
if args.jpg is True:
mat = sio.loadmat(f'{args.mask}')
mask = mat.get('Q1').astype(np.float64)
else:
mask = torch.load(f'{args.mask}').numpy()
# ---- load noises -----
if args.jpg is True:
noises = sio.loadmat(f'{args.noise}')
noises = noises.get('noises').astype(np.complex128) * 3.0
else:
noises = None
# ---- set options -----
opts = dict(sigma=args.sigma, alpha=args.alpha, maxitr=args.maxitr, verbose=args.verbose)
mu_snr = []
mu_vec = np.linspace(args.mu_lower, args.mu_upper, args.mu_step)
for mu in mu_vec:
# ---- plug and play !!! -----
if args.algo == "admm":
if args.verbose:
x_out, inc, x_init, zero_fill_snr, snr = PnP.pnp_admm_csmri.pnp_admm_csmri_(model, im_orig, mask, noises, mu, device, **opts)
else:
x_out, inc, x_init, zero_fill_snr = PnP.pnp_admm_csmri.pnp_admm_csmri_(model, im_orig, mask, noises, mu, device, **opts)
elif args.algo == "fbs":
if args.verbose:
x_out, inc, x_init, zero_fill_snr, snr = PnP.pnp_fbs_csmri.pnp_fbs_csmri_(model, im_orig, mask, noises, mu, device, **opts)
else:
x_out, inc, x_init, zero_fill_snr = PnP.pnp_fbs_csmri.pnp_fbs_csmri_(model, im_orig, mask, noises, mu, device, **opts)
# directory
path_mu = os.path.join(path, f"{mu}")
if not os.path.exists(path_mu):
os.makedirs(path_mu)
# ---- print result -----
out_snr = psnr(x_out, im_orig)
mu_snr.append(out_snr)
print('Plug-and-Play PNSR: ', out_snr)
metrics = {"PSNR": np.round(snr, 8), "Zero fill PSNR": np.round(zero_fill_snr, 8), }
with open(f'{path_mu}/snr.txt', 'w') as f:
for k, v in list(metrics.items()):
f.write("%s\n" % (k + ':' + f'{v}'))
# ---- save result -----
fig, ax1 = plt.subplots()
ax1.plot(inc, 'b-', linewidth=1)
ax1.set_xlabel('iteration')
ax1.set_ylabel('Increment', color='b')
ax1.set_title("Increment curve")
fig.savefig(f'{path_mu}/inc.png')
plt.show()
if args.verbose:
fig, ax1 = plt.subplots()
ax1.plot(snr, 'b-', linewidth=1)
ax1.set_xlabel('iteration')
ax1.set_ylabel('PSNR', color='b')
ax1.set_title("PSNR curve")
fig.savefig(f'{path_mu}/snr.png')
plt.show()
torch.save(torch.from_numpy(x_out), f'{path_mu}/{args.algo}.pt')
torch.save(torch.from_numpy(x_init), f'{path_mu}/ifft.pt')
x_out = scale(x_out)
x_init = scale(x_init)
cv2.imwrite(f'{path_mu}/{args.algo}.png', x_out)
cv2.imwrite(f'{path_mu}/ifft.png', x_init)
fig, ax1 = plt.subplots()
ax1.plot(mu_vec, np.asarray(mu_snr), 'b-', linewidth=1)
ax1.set_xlabel('mu')
ax1.set_ylabel('SNR', color='b')
ax1.set_title("SNR for different scaling mu")
fig.savefig(f'{path}/mu.png')
plt.show()
idx_max = np.argmax(np.asarray(mu_snr))
mu_max = mu_vec[idx_max]
param = {"mu": mu_max}
with open(f'{path}/mu.txt', 'w') as f:
for k, v in list(param.items()):
f.write("%s\n" % (k + ':' + f'{v}'))
| StarcoderdataPython |
9641715 | """This module implements a PDT distribution sub-class using a Gaussian mixture model
"""
import os
import sys
import numpy as np
from scipy.stats import rv_continuous
from scipy import integrate as sciint
from scipy import interpolate as sciinterp
from qp import sparse_rep
from qp.factory import add_class
from qp.interp_pdf import interp_gen
from qp.conversion_funcs import extract_sparse_from_xy
from qp.test_data import TEST_XVALS, NPDF
class sparse_gen(interp_gen):
"""Sparse based distribution. The final behavior is similar to interp_gen, but the constructor
takes a sparse representation to build the interpolator.
Attempt to inherit from interp_gen : this is failing
Notes
-----
This implements a qp interface to the original code SparsePz from <NAME>.
"""
# pylint: disable=protected-access
name = 'sparse'
version = 0
_support_mask = rv_continuous._support_mask
def __init__(self, xvals, mu, sig, dims, sparse_indices, *args, **kwargs):
self.sparse_indices = sparse_indices
self._xvals = xvals
self.mu = mu
self.sig = sig
self.dims = dims
cut = kwargs.pop('cut', 1.e-5)
#recreate the basis array from the metadata
sparse_meta = dict(xvals=xvals, mu=mu, sig=sig, dims=dims)
A = sparse_rep.create_basis(sparse_meta, cut=cut)
#decode the sparse indices into basis indices and weights
basis_indices, weights = sparse_rep.decode_sparse_indices(sparse_indices)
#retrieve the weighted array of basis functions for each object
pdf_y = A[:, basis_indices] * weights
#normalize and sum the weighted pdfs
x = sparse_meta['xvals']
y = pdf_y.sum(axis=-1)
norms = sciint.trapz(y.T, x)
y /= norms
kwargs.setdefault('xvals', x)
kwargs.setdefault('yvals', y.T)
super(sparse_gen, self).__init__(*args, **kwargs)
self._clearobjdata()
self._addmetadata('xvals', self._xvals)
self._addmetadata('mu', self.mu)
self._addmetadata('sig', self.sig)
self._addmetadata('dims', self.dims)
self._addobjdata('sparse_indices', self.sparse_indices)
def _updated_ctor_param(self):
"""
Add the two constructor's arguments for the Factory
"""
dct = super(sparse_gen, self)._updated_ctor_param()
dct['sparse_indices'] = self.sparse_indices
dct['xvals'] = self._xvals
dct['mu'] = self.mu
dct['sig'] = self.sig
dct['dims'] = self.dims
return dct
@classmethod
def get_allocation_kwds(cls, npdf, **kwargs):
if 'dims' not in kwargs:
raise ValueError("required argument dims not in kwargs") #pragma: no cover
nsp = np.array(kwargs['dims']).flatten()[4]
nmu = np.array(kwargs['dims']).flatten()[0]
return dict(sparse_indices=((npdf, nsp), 'i8'))
@classmethod
def add_mappings(cls):
"""
Add this classes mappings to the conversion dictionary
"""
cls._add_creation_method(cls.create, None)
cls._add_extraction_method(extract_sparse_from_xy, None)
@staticmethod
def build_test_data():
"""build a test case out of real pdfs"""
qproot = sys.modules['qp'].__path__[0]
filein = os.path.join(qproot, '../data/CFHTLens_sample.P.npy')
#FORMAT FILE, EACH ROW IS THE PDF FOR EACH GALAXY, LAST ROW IS THE REDSHIFT POSITION
P = np.load(filein)
z = P[-1]
P = P[:NPDF]
P = P / sciint.trapz(P, z).reshape(-1, 1)
minz = np.min(z)
nz = 301
_, j = np.where(P > 0)
maxz = np.max(z[j+1])
newz = np.linspace(minz, maxz, nz)
interp = sciinterp.interp1d(z, P, assume_sorted=True)
newpdf = interp(newz)
newpdf = newpdf / sciint.trapz(newpdf, newz).reshape(-1, 1)
sparse_idx, meta, _ = sparse_rep.build_sparse_representation(newz, newpdf, verbose=False)
return sparse_idx, meta
@classmethod
def make_test_data(cls):
SPARSE_IDX, META = cls.build_test_data()
cls.test_data = dict(sparse=dict(gen_func=sparse, \
ctor_data=dict(xvals=META['xvals'], mu=META['mu'], sig=META['sig'],\
dims=META['dims'], sparse_indices=SPARSE_IDX),\
test_xvals=TEST_XVALS), )
sparse = sparse_gen.create
add_class(sparse_gen)
| StarcoderdataPython |
1806294 | from django.conf import settings
from django.contrib.auth.mixins import LoginRequiredMixin as OrigLoginRequiredMixin
from django.contrib.auth.views import LoginView as OrigLoginView
from django.contrib.auth.views import LogoutView # noqa F401
from django.db.models import fields as model_fields
from django.shortcuts import redirect
from django.urls import (
reverse,
reverse_lazy,
)
from django.views.generic import View
from django.views.generic.base import ContextMixin
from .. import app_name, __version__
from ..forms import AuthenticationForm
extra_context = {
'webmail_url': settings.WEBMAIL_URL,
'vendor_name': settings.VENDOR_NAME,
'vendor_url': settings.VENDOR_URL,
'version': '' if settings.HIDE_VERSION else __version__,
}
class CommonContextMixin(ContextMixin):
extra_context = extra_context
class FieldsContextMixin():
field_types = {
model_fields.IntegerField: 'num',
model_fields.BooleanField: 'bool',
model_fields.TextField: 'textarea',
model_fields.CharField: 'str',
}
def _get_simple_type(self, field):
for field_type, simple_type in self.field_types.items():
if isinstance(field, field_type):
return simple_type
else:
return 'str'
def get_context_data(self, *args, **kwargs):
ctx = super().get_context_data(*args, **kwargs)
if hasattr(self, 'fields'):
fields = [self.model._meta.get_field(field_name) for field_name in self.fields]
else:
fields = [f for f in self.model._meta.get_fields() if not f.is_relation]
fields = {field.name: (getattr(ctx['object'], field.name), self._get_simple_type(field)) for field in fields}
return ctx | {
'fields': fields,
}
class SortMixin():
"""
default_sort will define the form field that will be sorted as default
sort_mapping will map any form field name to one or more db fields
sort.value, sort.name and sort.asc will be accessible from the template
"""
def get_default_sort(self):
if hasattr(self, 'default_sort'):
return self.default_sort
else:
return ''
def get_query_order_by(self):
sort_name = self.sort.get('name', '')
if sort_name:
sort_mapping = getattr(self, 'sort_mapping', {})
fields = sort_mapping.get(sort_name, sort_name)
prefix = '' if self.sort.get('asc', True) else '-'
if isinstance(fields, str):
return f'{prefix}{fields}'
else:
return [f'{prefix}{field}' for field in fields]
else:
return []
def get_ordering(self):
sort_value = self.request.GET.get('sort', self.get_default_sort())
self.sort = {
'value': sort_value,
'name': '',
'asc': False,
}
if sort_value:
if sort_value.startswith('-'):
self.sort['name'] = sort_value[1:]
else:
self.sort['name'] = sort_value
self.sort['asc'] = True
order_by = self.get_query_order_by()
return order_by
def get_context_data(self, *args, **kwargs):
return super().get_context_data(*args, **kwargs) | {
'sort': self.sort,
}
class LoginView(CommonContextMixin, OrigLoginView):
authentication_form = AuthenticationForm
template_name = f'{app_name}/login.html'
class LoginRequiredMixin(OrigLoginRequiredMixin):
login_url = reverse_lazy(f'{app_name}:login')
class IndexView(View):
def get(self, request):
return redirect(reverse(f'{app_name}:domain-list'))
| StarcoderdataPython |
3585038 | <reponame>mahidharc/iudx-auth-server<filename>test/test-auth.py
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import os
from init import consumer
from init import provider
from init import resource_server
from init import expect_failure
RS = "iisc.iudx.org.in"
expect_failure(True)
r = consumer.get_policy()
assert r['success'] is False
assert r['status_code'] == 403
policy = "x can access *" # dummy policy
r = consumer.set_policy(policy)
assert r['success'] is False
assert r['status_code'] == 403
r = consumer.append_policy(policy)
assert r['success'] is False
assert r['status_code'] == 403
r = resource_server.get_policy()
assert r['success'] is False
assert r['status_code'] == 403
r = resource_server.set_policy(policy)
assert r['success'] is False
assert r['status_code'] == 403
r = resource_server.append_policy(policy)
assert r['success'] is False
assert r['status_code'] == 403
tokens = ["dummy"]
token_hashes = ["dummy"]
r = resource_server.revoke_tokens(tokens)
assert r['success'] is False
assert r['status_code'] == 403
r = resource_server.revoke_token_hashes(tokens)
assert r['success'] is False
assert r['status_code'] == 403
r = resource_server.revoke_all("invalid","invalid")
assert r['success'] is False
assert r['status_code'] == 403
r = resource_server.audit_tokens(10)
assert r['success'] is False
assert r['status_code'] == 403
r = resource_server.add_consumer_to_group("arun","confidential",20)
assert r['success'] is False
assert r['status_code'] == 403
r = resource_server.delete_consumer_from_group("arun","confidential")
assert r['success'] is False
assert r['status_code'] == 403
r = resource_server.list_group("confidential")
assert r['success'] is False
assert r['status_code'] == 403
token = {}
server_token = {}
r = provider.introspect_token (token,server_token)
assert r['success'] is False
assert r['status_code'] == 403
# consumers
r = consumer.introspect_token (token,server_token)
assert r['success'] is False
assert r['status_code'] == 403
r = consumer.introspect_token (token,server_token)
assert r['success'] is False
assert r['status_code'] == 403
r = consumer.add_consumer_to_group("arun","confidential",20)
assert r['success'] is False
assert r['status_code'] == 403
r = consumer.delete_consumer_from_group("arun","confidential")
assert r['success'] is False
assert r['status_code'] == 403
r = consumer.list_group("confidential")
assert r['success'] is False
assert r['status_code'] == 403
expect_failure(False)
| StarcoderdataPython |
3229368 | import datetime
from uuid import uuid4
from typing import Set
from fastapi import FastAPI, Security
from fastapi.testclient import TestClient
from pytest_mock import MockerFixture
from fastapi_jwt import JwtAccessBearer, JwtAuthorizationCredentials, JwtRefreshBearer
app = FastAPI()
access_security = JwtAccessBearer(secret_key="secret_key")
refresh_security = JwtRefreshBearer.from_other(access_security)
unique_identifiers_database: Set[str] = set()
@app.post("/auth")
def auth():
subject = {"username": "username", "role": "user"}
unique_identifier = str(uuid4())
unique_identifiers_database.add(unique_identifier)
access_token = access_security.create_access_token(
subject=subject, unique_identifier=unique_identifier
)
refresh_token = access_security.create_refresh_token(subject=subject)
return {"access_token": access_token, "refresh_token": refresh_token}
@app.post("/refresh")
def refresh(credentials: JwtAuthorizationCredentials = Security(refresh_security)):
unique_identifier = str(uuid4())
unique_identifiers_database.add(unique_identifier)
access_token = refresh_security.create_access_token(
subject=credentials.subject, unique_identifier=unique_identifier,
)
refresh_token = refresh_security.create_refresh_token(subject=credentials.subject)
return {"access_token": access_token, "refresh_token": refresh_token}
@app.get("/users/me")
def read_current_user(
credentials: JwtAuthorizationCredentials = Security(access_security),
):
return {"username": credentials["username"], "role": credentials["role"]}
@app.get("/auth/meta")
def get_token_meta(
credentials: JwtAuthorizationCredentials = Security(access_security),
):
return {"jti": credentials.jti}
class _FakeDateTimeShort(datetime.datetime): # pragma: no cover
@staticmethod
def now(**kwargs):
return datetime.datetime.now() + datetime.timedelta(minutes=3)
@staticmethod
def utcnow(**kwargs):
return datetime.datetime.utcnow() + datetime.timedelta(minutes=3)
class _FakeDateTimeLong(datetime.datetime): # pragma: no cover
@staticmethod
def now(**kwargs):
return datetime.datetime.now() + datetime.timedelta(days=42)
@staticmethod
def utcnow(**kwargs):
return datetime.datetime.utcnow() + datetime.timedelta(days=42)
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/auth": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Auth",
"operationId": "auth_auth_post",
}
},
"/refresh": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Refresh",
"operationId": "refresh_refresh_post",
"security": [{"JwtRefreshBearer": []}],
}
},
"/users/me": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Read Current User",
"operationId": "read_current_user_users_me_get",
"security": [{"JwtAccessBearer": []}],
}
},
"/auth/meta": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Get Token Meta",
"operationId": "get_token_meta_auth_meta_get",
"security": [{"JwtAccessBearer": []}],
}
},
},
"components": {
"securitySchemes": {
"JwtAccessBearer": {"type": "http", "scheme": "bearer"},
"JwtRefreshBearer": {"type": "http", "scheme": "bearer"},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_security_jwt_access_token():
access_token = client.post("/auth").json()["access_token"]
response = client.get(
"/users/me", headers={"Authorization": f"Bearer {access_token}"}
)
assert response.status_code == 200, response.text
assert response.json() == {"username": "username", "role": "user"}
def test_security_jwt_access_token_wrong():
response = client.get(
"/users/me", headers={"Authorization": "Bearer wrong_access_token"}
)
assert response.status_code == 401, response.text
assert response.json()["detail"].startswith("Wrong token:")
response = client.get(
"/users/me", headers={"Authorization": "Bearer wrong.access.token"}
)
assert response.status_code == 401, response.text
assert response.json()["detail"].startswith("Wrong token:")
def test_security_jwt_access_token_changed():
access_token = client.post("/auth").json()["access_token"]
access_token = access_token.split(".")[0] + ".wrong." + access_token.split(".")[-1]
response = client.get(
"/users/me", headers={"Authorization": f"Bearer {access_token}"}
)
assert response.status_code == 401, response.text
assert response.json()["detail"].startswith("Wrong token:")
def test_security_jwt_access_token_expiration(mocker: MockerFixture):
access_token = client.post("/auth").json()["access_token"]
mocker.patch("jose.jwt.datetime", _FakeDateTimeShort) # 3 min left
response = client.get(
"/users/me", headers={"Authorization": f"Bearer {access_token}"}
)
assert response.status_code == 200, response.text
mocker.patch("jose.jwt.datetime", _FakeDateTimeLong) # 42 days left
response = client.get(
"/users/me", headers={"Authorization": f"Bearer {access_token}"}
)
assert response.status_code == 401, response.text
assert response.json()["detail"].startswith(
"Token time expired: Signature has expired"
)
def test_security_jwt_refresh_token():
refresh_token = client.post("/auth").json()["refresh_token"]
response = client.post(
"/refresh", headers={"Authorization": f"Bearer {refresh_token}"}
)
assert response.status_code == 200, response.text
def test_security_jwt_refresh_token_wrong():
response = client.post(
"/refresh", headers={"Authorization": "Bearer wrong_refresh_token"}
)
assert response.status_code == 401, response.text
assert response.json()["detail"].startswith("Wrong token:")
response = client.post(
"/refresh", headers={"Authorization": "Bearer wrong.refresh.token"}
)
assert response.status_code == 401, response.text
assert response.json()["detail"].startswith("Wrong token:")
def test_security_jwt_refresh_token_using_access_token():
tokens = client.post("/auth").json()
access_token, refresh_token = tokens["access_token"], tokens["refresh_token"]
assert access_token != refresh_token
response = client.post(
"/refresh", headers={"Authorization": f"Bearer {access_token}"}
)
assert response.status_code == 401, response.text
assert response.json()["detail"].startswith("Wrong token: 'type' is not 'refresh'")
def test_security_jwt_refresh_token_changed():
refresh_token = client.post("/auth").json()["refresh_token"]
refresh_token = (
refresh_token.split(".")[0] + ".wrong." + refresh_token.split(".")[-1]
)
response = client.post(
"/refresh", headers={"Authorization": f"Bearer {refresh_token}"}
)
assert response.status_code == 401, response.text
assert response.json()["detail"].startswith("Wrong token:")
def test_security_jwt_refresh_token_expired(mocker: MockerFixture):
refresh_token = client.post("/auth").json()["refresh_token"]
mocker.patch("jose.jwt.datetime", _FakeDateTimeLong) # 42 days left
response = client.post(
"/refresh", headers={"Authorization": f"Bearer {refresh_token}"}
)
assert response.status_code == 401, response.text
assert response.json()["detail"].startswith(
"Token time expired: Signature has expired"
)
def test_security_jwt_custom_jti():
access_token = client.post("/auth").json()["access_token"]
response = client.get(
"/auth/meta", headers={"Authorization": f"Bearer {access_token}"}
)
assert response.status_code == 200, response.text
assert response.json()["jti"] in unique_identifiers_database | StarcoderdataPython |
9780056 | <reponame>fernandezc/spectrochempy_gui
import os
from ..Qt import QtCore, QtGui
from ..python2_3 import asUnicode
from .Parameter import Parameter, registerParameterType
from .ParameterItem import ParameterItem
from ..widgets.SpinBox import SpinBox
from ..widgets.ColorButton import ColorButton
from ..colormap import ColorMap
from .. import icons as icons
from .. import functions as fn
from collections import OrderedDict
class WidgetParameterItem(ParameterItem):
"""
ParameterTree item with:
* label in second column for displaying value
* simple widget for editing value (displayed instead of label when item is selected)
* button that resets value to default
========================== =============================================================
**Registered Types:**
int Displays a :class:`SpinBox <pyqtgraph.SpinBox>` in integer
mode.
float Displays a :class:`SpinBox <pyqtgraph.SpinBox>`.
bool Displays a QCheckBox
str Displays a QLineEdit
color Displays a :class:`ColorButton <pyqtgraph.ColorButton>`
colormap Displays a :class:`GradientWidget <pyqtgraph.GradientWidget>`
========================== =============================================================
This class can be subclassed by overriding makeWidget() to provide a custom widget.
"""
def __init__(self, param, depth):
ParameterItem.__init__(self, param, depth)
self.asSubItem = False # place in a child item's column 0 instead of column 1
self.hideWidget = True ## hide edit widget, replace with label when not selected
## set this to False to keep the editor widget always visible
# build widget with a display label and default button
w = self.makeWidget()
self.widget = w
self.eventProxy = EventProxy(w, self.widgetEventFilter)
if self.asSubItem:
self.subItem = QtGui.QTreeWidgetItem()
self.subItem.depth = self.depth + 1
self.subItem.setFlags(QtCore.Qt.NoItemFlags)
self.addChild(self.subItem)
self.defaultBtn = QtGui.QPushButton()
self.defaultBtn.setAutoDefault(False)
self.defaultBtn.setFixedWidth(20)
self.defaultBtn.setFixedHeight(20)
modDir = os.path.dirname(__file__)
self.defaultBtn.setIcon(icons.getGraphIcon('default'))
self.defaultBtn.clicked.connect(self.defaultClicked)
self.displayLabel = QtGui.QLabel()
layout = QtGui.QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(2)
if not self.asSubItem:
layout.addWidget(w, 1)
layout.addWidget(self.displayLabel, 1)
layout.addStretch(0)
layout.addWidget(self.defaultBtn)
self.layoutWidget = QtGui.QWidget()
self.layoutWidget.setLayout(layout)
if w.sigChanged is not None:
w.sigChanged.connect(self.widgetValueChanged)
if hasattr(w, 'sigChanging'):
w.sigChanging.connect(self.widgetValueChanging)
## update value shown in widget.
opts = self.param.opts
if opts.get('value', None) is not None:
self.valueChanged(self, opts['value'], force=True)
else:
## no starting value was given; use whatever the widget has
self.widgetValueChanged()
self.updateDefaultBtn()
self.optsChanged(self.param, self.param.opts)
# set size hints
sw = self.widget.sizeHint()
sb = self.defaultBtn.sizeHint()
# shrink row heights a bit for more compact look
sw.setHeight(int(sw.height() * 0.9))
sb.setHeight(int(sb.height() * 0.9))
if self.asSubItem:
self.setSizeHint(1, sb)
self.subItem.setSizeHint(0, sw)
else:
w = sw.width() + sb.width()
h = max(sw.height(), sb.height())
self.setSizeHint(1, QtCore.QSize(w, h))
def makeWidget(self):
"""
Return a single widget whose position in the tree is determined by the
value of self.asSubItem. If True, it will be placed in the second tree
column, and if False, the first tree column of a child item.
The widget must be given three attributes:
========== ============================================================
sigChanged a signal that is emitted when the widget's value is changed
value a function that returns the value
setValue a function that sets the value
========== ============================================================
This is a good function to override in subclasses.
"""
opts = self.param.opts
t = opts['type']
if t in ('int', 'float'):
defs = {
'value': 0, 'min': None, 'max': None,
'step': 1.0, 'dec': False,
'siPrefix': False, 'suffix': '', 'decimals': 3,
}
if t == 'int':
defs['int'] = True
defs['minStep'] = 1.0
for k in defs:
if k in opts:
defs[k] = opts[k]
if 'limits' in opts:
defs['min'], defs['max'] = opts['limits']
w = SpinBox()
w.setOpts(**defs)
w.sigChanged = w.sigValueChanged
w.sigChanging = w.sigValueChanging
elif t == 'bool':
w = QtGui.QCheckBox()
w.sigChanged = w.toggled
w.value = w.isChecked
w.setValue = w.setChecked
self.hideWidget = False
elif t == 'str':
w = QtGui.QLineEdit()
w.setStyleSheet('border: 0px')
w.sigChanged = w.editingFinished
w.value = lambda: asUnicode(w.text())
w.setValue = lambda v: w.setText(asUnicode(v))
w.sigChanging = w.textChanged
elif t == 'color':
w = ColorButton()
w.sigChanged = w.sigColorChanged
w.sigChanging = w.sigColorChanging
w.value = w.color
w.setValue = w.setColor
self.hideWidget = False
w.setFlat(True)
elif t == 'colormap':
from ..widgets.GradientWidget import GradientWidget ## need this here to avoid import loop
w = GradientWidget(orientation='bottom')
w.sizeHint = lambda: QtCore.QSize(300, 35)
w.sigChanged = w.sigGradientChangeFinished
w.sigChanging = w.sigGradientChanged
w.value = w.colorMap
w.setValue = w.setColorMap
self.hideWidget = False
self.asSubItem = True
else:
raise Exception("Unknown type '%s'" % asUnicode(t))
return w
def widgetEventFilter(self, obj, ev):
## filter widget's events
## catch TAB to change focus
## catch focusOut to hide editor
if ev.type() == ev.KeyPress:
if ev.key() == QtCore.Qt.Key_Tab:
self.focusNext(forward=True)
return True ## don't let anyone else see this event
elif ev.key() == QtCore.Qt.Key_Backtab:
self.focusNext(forward=False)
return True ## don't let anyone else see this event
return False
def setFocus(self):
self.showEditor()
def isFocusable(self):
return self.param.opts['visible'] and self.param.opts['enabled'] and self.param.writable()
def valueChanged(self, param, val, force=False):
## called when the parameter's value has changed
ParameterItem.valueChanged(self, param, val)
if force or not fn.eq(val, self.widget.value()):
try:
self.widget.sigChanged.disconnect(self.widgetValueChanged)
self.param.sigValueChanged.disconnect(self.valueChanged)
self.widget.setValue(val)
self.param.setValue(self.widget.value())
finally:
self.widget.sigChanged.connect(self.widgetValueChanged)
self.param.sigValueChanged.connect(self.valueChanged)
self.updateDisplayLabel() ## always make sure label is updated, even if values match!
self.updateDefaultBtn()
def updateDefaultBtn(self):
## enable/disable default btn
self.defaultBtn.setEnabled(
not self.param.valueIsDefault() and self.param.opts['enabled'] and self.param.writable())
# hide / show
self.defaultBtn.setVisible(self.param.hasDefault() and not self.param.readonly())
def updateDisplayLabel(self, value=None):
"""Update the display label to reflect the value of the parameter."""
if value is None:
value = self.param.value()
opts = self.param.opts
if isinstance(self.widget, QtGui.QAbstractSpinBox):
text = asUnicode(self.widget.lineEdit().text())
elif isinstance(self.widget, QtGui.QComboBox):
text = self.widget.currentText()
else:
text = asUnicode(value)
self.displayLabel.setText(text)
def widgetValueChanged(self):
## called when the widget's value has been changed by the user
val = self.widget.value()
newVal = self.param.setValue(val)
def widgetValueChanging(self, *args):
"""
Called when the widget's value is changing, but not finalized.
For example: editing text before pressing enter or changing focus.
"""
self.param.sigValueChanging.emit(self.param, self.widget.value())
def selected(self, sel):
"""Called when this item has been selected (sel=True) OR deselected (sel=False)"""
ParameterItem.selected(self, sel)
if self.widget is None:
return
if sel and self.param.writable():
self.showEditor()
elif self.hideWidget:
self.hideEditor()
def showEditor(self):
self.widget.show()
self.displayLabel.hide()
self.widget.setFocus(QtCore.Qt.OtherFocusReason)
if isinstance(self.widget, SpinBox):
self.widget.selectNumber() # select the numerical portion of the text for quick editing
def hideEditor(self):
self.widget.hide()
self.displayLabel.show()
def limitsChanged(self, param, limits):
"""Called when the parameter's limits have changed"""
ParameterItem.limitsChanged(self, param, limits)
t = self.param.opts['type']
if t == 'int' or t == 'float':
self.widget.setOpts(bounds=limits)
else:
return ## don't know what to do with any other types..
def defaultChanged(self, param, value):
self.updateDefaultBtn()
def treeWidgetChanged(self):
"""Called when this item is added or removed from a tree."""
ParameterItem.treeWidgetChanged(self)
## add all widgets for this item into the tree
if self.widget is not None:
tree = self.treeWidget()
if tree is None:
return
if self.asSubItem:
self.subItem.setFirstColumnSpanned(True)
tree.setItemWidget(self.subItem, 0, self.widget)
tree.setItemWidget(self, 1, self.layoutWidget)
self.displayLabel.hide()
self.selected(False)
def defaultClicked(self):
self.param.setToDefault()
def optsChanged(self, param, opts):
"""Called when any options are changed that are not
name, value, default, or limits"""
ParameterItem.optsChanged(self, param, opts)
if 'enabled' in opts:
self.updateDefaultBtn()
self.widget.setEnabled(opts['enabled'])
if 'readonly' in opts:
self.updateDefaultBtn()
if hasattr(self.widget, 'setReadOnly'):
self.widget.setReadOnly(opts['readonly'])
else:
self.widget.setEnabled(self.param.opts['enabled'] and not opts['readonly'])
if 'tip' in opts:
self.widget.setToolTip(opts['tip'])
## If widget is a SpinBox, pass options straight through
if isinstance(self.widget, SpinBox):
# send only options supported by spinbox
sbOpts = {}
if 'units' in opts and 'suffix' not in opts:
sbOpts['suffix'] = opts['units']
for k,v in opts.items():
if k in self.widget.opts:
sbOpts[k] = v
self.widget.setOpts(**sbOpts)
self.updateDisplayLabel()
class EventProxy(QtCore.QObject):
def __init__(self, qobj, callback):
QtCore.QObject.__init__(self)
self.callback = callback
qobj.installEventFilter(self)
def eventFilter(self, obj, ev):
return self.callback(obj, ev)
class SimpleParameter(Parameter):
"""Parameter representing a single value.
This parameter is backed by :class:`WidgetParameterItem` to represent the
following parameter names:
- 'int'
- 'float'
- 'bool'
- 'str'
- 'color'
- 'colormap'
"""
itemClass = WidgetParameterItem
def __init__(self, *args, **kargs):
"""Initialize the parameter.
This is normally called implicitly through :meth:`Parameter.create`.
The keyword arguments avaialble to :meth:`Parameter.__init__` are
applicable.
"""
Parameter.__init__(self, *args, **kargs)
## override a few methods for color parameters
if self.opts['type'] == 'color':
self.value = self.colorValue
self.saveState = self.saveColorState
def colorValue(self):
return fn.mkColor(Parameter.value(self))
def saveColorState(self, *args, **kwds):
state = Parameter.saveState(self, *args, **kwds)
state['value'] = fn.colorTuple(self.value())
return state
def _interpretValue(self, v):
fn = {
'int': int,
'float': float,
'bool': bool,
'str': asUnicode,
'color': self._interpColor,
'colormap': self._interpColormap,
}[self.opts['type']]
return fn(v)
def _interpColor(self, v):
return fn.mkColor(v)
def _interpColormap(self, v):
if not isinstance(v, ColorMap):
raise TypeError("Cannot set colormap parameter from object %r" % v)
return v
registerParameterType('int', SimpleParameter, override=True)
registerParameterType('float', SimpleParameter, override=True)
registerParameterType('bool', SimpleParameter, override=True)
registerParameterType('str', SimpleParameter, override=True)
registerParameterType('color', SimpleParameter, override=True)
registerParameterType('colormap', SimpleParameter, override=True)
class GroupParameterItem(ParameterItem):
"""
Group parameters are used mainly as a generic parent item that holds (and groups!) a set
of child parameters. It also provides a simple mechanism for displaying a button or combo
that can be used to add new parameters to the group.
"""
def __init__(self, param, depth):
ParameterItem.__init__(self, param, depth)
self.updateDepth(depth)
self.addItem = None
if 'addText' in param.opts:
addText = param.opts['addText']
if 'addList' in param.opts:
self.addWidget = QtGui.QComboBox()
self.addWidget.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents)
self.updateAddList()
self.addWidget.currentIndexChanged.connect(self.addChanged)
else:
self.addWidget = QtGui.QPushButton(addText)
self.addWidget.clicked.connect(self.addClicked)
w = QtGui.QWidget()
l = QtGui.QHBoxLayout()
l.setContentsMargins(0,0,0,0)
w.setLayout(l)
l.addWidget(self.addWidget)
l.addStretch()
self.addWidgetBox = w
self.addItem = QtGui.QTreeWidgetItem([])
self.addItem.setFlags(QtCore.Qt.ItemIsEnabled)
self.addItem.depth = self.depth + 1
ParameterItem.addChild(self, self.addItem)
self.addItem.setSizeHint(0, self.addWidgetBox.sizeHint())
self.optsChanged(self.param, self.param.opts)
def updateDepth(self, depth):
## Change item's appearance based on its depth in the tree
## This allows highest-level groups to be displayed more prominently.
if depth == 0:
for c in [0,1]:
self.setBackground(c, QtGui.QBrush(QtGui.QColor(100,100,100)))
self.setForeground(c, QtGui.QBrush(QtGui.QColor(220,220,255)))
font = self.font(c)
font.setBold(True)
font.setPointSize(font.pointSize()+1)
self.setFont(c, font)
else:
for c in [0,1]:
self.setBackground(c, QtGui.QBrush(QtGui.QColor(220,220,220)))
self.setForeground(c, QtGui.QBrush(QtGui.QColor(50,50,50)))
font = self.font(c)
font.setBold(True)
#font.setPointSize(font.pointSize()+1)
self.setFont(c, font)
self.titleChanged() # sets the size hint for column 0 which is based on the new font
def addClicked(self):
"""Called when "add new" button is clicked
The parameter MUST have an 'addNew' method defined.
"""
self.param.addNew()
def addChanged(self):
"""Called when "add new" combo is changed
The parameter MUST have an 'addNew' method defined.
"""
if self.addWidget.currentIndex() == 0:
return
typ = asUnicode(self.addWidget.currentText())
self.param.addNew(typ)
self.addWidget.setCurrentIndex(0)
def treeWidgetChanged(self):
ParameterItem.treeWidgetChanged(self)
tw = self.treeWidget()
if tw is None:
return
self.setFirstColumnSpanned(True)
if self.addItem is not None:
tw.setItemWidget(self.addItem, 0, self.addWidgetBox)
self.addItem.setFirstColumnSpanned(True)
def addChild(self, child): ## make sure added childs are actually inserted before add btn
if self.addItem is not None:
ParameterItem.insertChild(self, self.childCount()-1, child)
else:
ParameterItem.addChild(self, child)
def optsChanged(self, param, opts):
ParameterItem.optsChanged(self, param, opts)
if 'addList' in opts:
self.updateAddList()
if hasattr(self, 'addWidget'):
if 'enabled' in opts:
self.addWidget.setEnabled(opts['enabled'])
if 'tip' in opts:
self.addWidget.setToolTip(opts['tip'])
def updateAddList(self):
self.addWidget.blockSignals(True)
try:
self.addWidget.clear()
self.addWidget.addItem(self.param.opts['addText'])
for t in self.param.opts['addList']:
self.addWidget.addItem(t)
finally:
self.addWidget.blockSignals(False)
class GroupParameter(Parameter):
"""
Group parameters are used mainly as a generic parent item that holds (and groups!) a set
of child parameters.
It also provides a simple mechanism for displaying a button or combo
that can be used to add new parameters to the group. To enable this, the group
must be initialized with the 'addText' option (the text will be displayed on
a button which, when clicked, will cause addNew() to be called). If the 'addList'
option is specified as well, then a dropdown-list of addable items will be displayed
instead of a button.
"""
itemClass = GroupParameterItem
sigAddNew = QtCore.Signal(object, object) # self, type
def addNew(self, typ=None):
"""
This method is called when the user has requested to add a new item to the group.
By default, it emits ``sigAddNew(self, typ)``.
"""
self.sigAddNew.emit(self, typ)
def setAddList(self, vals):
"""Change the list of options available for the user to add to the group."""
self.setOpts(addList=vals)
registerParameterType('group', GroupParameter, override=True)
class ListParameterItem(WidgetParameterItem):
"""
WidgetParameterItem subclass providing comboBox that lets the user select from a list of options.
"""
def __init__(self, param, depth):
self.targetValue = None
WidgetParameterItem.__init__(self, param, depth)
def makeWidget(self):
opts = self.param.opts
t = opts['type']
w = QtGui.QComboBox()
w.setMaximumHeight(20) ## set to match height of spin box and line edit
w.sigChanged = w.currentIndexChanged
w.value = self.value
w.setValue = self.setValue
self.widget = w ## needs to be set before limits are changed
self.limitsChanged(self.param, self.param.opts['limits'])
if len(self.forward) > 0:
self.setValue(self.param.value())
return w
def value(self):
key = asUnicode(self.widget.currentText())
return self.forward.get(key, None)
def setValue(self, val):
self.targetValue = val
if val not in self.reverse[0]:
self.widget.setCurrentIndex(0)
else:
key = self.reverse[1][self.reverse[0].index(val)]
ind = self.widget.findText(key)
self.widget.setCurrentIndex(ind)
def limitsChanged(self, param, limits):
# set up forward / reverse mappings for name:value
if len(limits) == 0:
limits = [''] ## Can never have an empty list--there is always at least a singhe blank item.
self.forward, self.reverse = ListParameter.mapping(limits)
try:
self.widget.blockSignals(True)
val = self.targetValue #asUnicode(self.widget.currentText())
self.widget.clear()
for k in self.forward:
self.widget.addItem(k)
if k == val:
self.widget.setCurrentIndex(self.widget.count()-1)
self.updateDisplayLabel()
finally:
self.widget.blockSignals(False)
class ListParameter(Parameter):
"""Parameter with a list of acceptable values.
By default, this parameter is represtented by a :class:`ListParameterItem`,
displaying a combo box to select a value from the list.
In addition to the generic :class:`~pyqtgraph.parametertree.Parameter`
options, this parameter type accepts a ``limits`` argument specifying the
list of allowed values. ``values`` is an alias and may be used instead.
The values may generally be of any data type, as long as they can be
represented as a string. If the string representation provided is
undesirable, the values may be given as a dictionary mapping the desired
string representation to the value.
"""
itemClass = ListParameterItem
def __init__(self, **opts):
self.forward = OrderedDict() ## {name: value, ...}
self.reverse = ([], []) ## ([value, ...], [name, ...])
# Parameter uses 'limits' option to define the set of allowed values
if 'values' in opts:
opts['limits'] = opts['values']
if opts.get('limits', None) is None:
opts['limits'] = []
Parameter.__init__(self, **opts)
self.setLimits(opts['limits'])
def setLimits(self, limits):
"""Change the list of allowed values."""
self.forward, self.reverse = self.mapping(limits)
Parameter.setLimits(self, limits)
if len(self.reverse[0]) > 0 and self.value() not in self.reverse[0]:
self.setValue(self.reverse[0][0])
@staticmethod
def mapping(limits):
# Return forward and reverse mapping objects given a limit specification
forward = OrderedDict() ## {name: value, ...}
reverse = ([], []) ## ([value, ...], [name, ...])
if isinstance(limits, dict):
for k, v in limits.items():
forward[k] = v
reverse[0].append(v)
reverse[1].append(k)
else:
for v in limits:
n = asUnicode(v)
forward[n] = v
reverse[0].append(v)
reverse[1].append(n)
return forward, reverse
registerParameterType('list', ListParameter, override=True)
class ActionParameterItem(ParameterItem):
"""ParameterItem displaying a clickable button."""
def __init__(self, param, depth):
ParameterItem.__init__(self, param, depth)
self.layoutWidget = QtGui.QWidget()
self.layout = QtGui.QHBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.layoutWidget.setLayout(self.layout)
self.button = QtGui.QPushButton()
#self.layout.addSpacing(100)
self.layout.addWidget(self.button)
self.layout.addStretch()
self.button.clicked.connect(self.buttonClicked)
self.titleChanged()
self.optsChanged(self.param, self.param.opts)
def treeWidgetChanged(self):
ParameterItem.treeWidgetChanged(self)
tree = self.treeWidget()
if tree is None:
return
self.setFirstColumnSpanned(True)
tree.setItemWidget(self, 0, self.layoutWidget)
def titleChanged(self):
self.setText(0, self.param.title())
color = self.layoutWidget.palette().color(self.layoutWidget.backgroundRole())
self.setForeground(0, color)
self.button.setText(self.param.title())
self.setSizeHint(0, self.button.sizeHint())
def optsChanged(self, param, opts):
ParameterItem.optsChanged(self, param, opts)
if 'enabled' in opts:
self.button.setEnabled(opts['enabled'])
if 'tip' in opts:
self.button.setToolTip(opts['tip'])
def buttonClicked(self):
self.param.activate()
class ActionParameter(Parameter):
"""Used for displaying a button within the tree.
``sigActivated(self)`` is emitted when the button is clicked.
"""
itemClass = ActionParameterItem
sigActivated = QtCore.Signal(object)
def activate(self):
self.sigActivated.emit(self)
self.emitStateChanged('activated', None)
registerParameterType('action', ActionParameter, override=True)
class TextParameterItem(WidgetParameterItem):
"""ParameterItem displaying a QTextEdit widget."""
def makeWidget(self):
self.hideWidget = False
self.asSubItem = True
self.textBox = w = QtGui.QTextEdit()
w.sizeHint = lambda: QtCore.QSize(300, 100)
w.value = lambda: str(w.toPlainText())
w.setValue = w.setPlainText
w.sigChanged = w.textChanged
return w
class TextParameter(Parameter):
"""Editable string, displayed as large text box in the tree."""
itemClass = TextParameterItem
registerParameterType('text', TextParameter, override=True)
| StarcoderdataPython |
5079291 | <reponame>fortyninemaps/karta
import unittest
import os.path
import numpy as np
from test_helper import TMPDATA
import karta
from karta.raster import _gdal
class GdalTests(unittest.TestCase):
def test_numpy_type_coercion(self):
self.assertEqual(_gdal.numpy_dtype(2), np.uint16)
self.assertEqual(_gdal.numpy_dtype(3), np.int16)
self.assertEqual(_gdal.numpy_dtype(4), np.uint32)
self.assertEqual(_gdal.numpy_dtype(5), np.int32)
self.assertEqual(_gdal.numpy_dtype(6), np.float32)
self.assertEqual(_gdal.numpy_dtype(7), np.float64)
self.assertEqual(_gdal.numpy_dtype(8), np.complex64)
self.assertEqual(_gdal.numpy_dtype(9), np.complex64)
self.assertEqual(_gdal.numpy_dtype(10), np.complex64)
self.assertEqual(_gdal.numpy_dtype(11), np.complex64)
return
def test_write_read(self):
# try writing a file, then read it back in and verify that it matches
v = peaks(500)[:100,:]
utm7 = karta.crs.ProjectedCRS("+proj=utm +zone=7 +north +datum=WGS84",
"UTM 7N (WGS 84)")
g = karta.RegularGrid([15.0, 15.0, 30.0, 30.0, 0.0, 0.0], v, crs=utm7)
fpath = os.path.join(TMPDATA, "test.tif")
ret = g.to_geotiff(fpath, compress=None)
self.assertEqual(ret, g)
gnew = karta.read_geotiff(fpath)
self.assertTrue("+proj=utm" in gnew.crs.get_proj4())
self.assertTrue("+zone=7" in gnew.crs.get_proj4())
self.assertEqual(g.transform, gnew.transform)
self.assertEqual(g.values.dtype, gnew.values.dtype)
self.assertTrue(np.all(g[:,:] == gnew[:,:]))
return
def test_write_read_disk(self):
# try writing a file, then open it without loading into memory and verify
v = peaks(500)[:100,:]
utm7 = karta.crs.ProjectedCRS("+proj=utm +zone=7 +north +datum=WGS84",
"UTM 7N (WGS 84)")
g = karta.RegularGrid([15.0, 15.0, 30.0, 30.0, 0.0, 0.0], v, crs=utm7)
fpath = os.path.join(TMPDATA, "test.tif")
g.to_geotiff(fpath, compress=None)
gnew = karta.read_geotiff(fpath, in_memory=False)
self.assertEqual(g.transform, gnew.transform)
self.assertEqual(g.values.dtype, gnew.values.dtype)
self.assertEqual(g.size, gnew.size)
self.assertTrue(np.all(g[10:50:3, 15:45:2] == gnew[10:50:3, 15:45:2]))
self.assertTrue(np.all(g[10:50:3, 45:15:-2] == gnew[10:50:3, 45:15:-2]))
self.assertTrue(np.all(g[:,:] == gnew[:,:]))
return
def test_write_compress(self):
v = peaks(500)[:100,:]
utm7 = karta.crs.ProjectedCRS("+proj=utm +zone=7 +north +datum=WGS84",
"UTM 7N (WGS 84)")
g = karta.RegularGrid([15.0, 15.0, 30.0, 30.0, 0.0, 0.0], v, crs=utm7)
fpath = os.path.join(TMPDATA, "test.tif")
g.to_geotiff(fpath, compress="LZW")
self.assertTrue(os.path.isfile(fpath))
os.remove(fpath)
g.to_geotiff(fpath, compress="PACKBITS")
self.assertTrue(os.path.isfile(fpath))
return
def test_read_as_bands(self):
# write several files and then read as a single multiband grid
v = peaks(500)[:100,:]
utm7 = karta.crs.ProjectedCRS("+proj=utm +zone=7 +north +datum=WGS84",
"UTM 7N (WGS 84)")
g1 = karta.RegularGrid([15.0, 15.0, 30.0, 30.0, 0.0, 0.0], v, crs=utm7)
g2 = karta.RegularGrid([15.0, 15.0, 30.0, 30.0, 0.0, 0.0], v**2, crs=utm7)
g3 = karta.RegularGrid([15.0, 15.0, 30.0, 30.0, 0.0, 0.0], v+2, crs=utm7)
g4 = karta.RegularGrid([15.0, 15.0, 30.0, 30.0, 0.0, 0.0], v*2, crs=utm7)
paths = []
for i, g in enumerate((g1, g2, g3, g4)):
fpath = os.path.join(TMPDATA, "test{0}.tif".format(i))
g.to_geotiff(fpath, compress=None)
paths.append(fpath)
gnew = karta.from_geotiffs(*paths)
self.assertTrue("+proj=utm" in gnew.crs.get_proj4())
self.assertTrue("+zone=7" in gnew.crs.get_proj4())
self.assertEqual(g.transform, gnew.transform)
self.assertEqual(g.values.dtype, gnew.values.dtype)
self.assertEqual(gnew.size, (100, 500))
self.assertEqual(gnew.nbands, 4)
return
class GdalVirtualArrayTests(unittest.TestCase):
def setUp(self):
v = peaks(500)[:100,:]
utm7 = karta.crs.ProjectedCRS("+proj=utm +zone=7 +north +datum=WGS84",
"UTM 7N (WGS 84)")
g = karta.RegularGrid([15.0, 15.0, 30.0, 30.0, 0.0, 0.0], v, crs=utm7)
fpath = os.path.join(TMPDATA, "test.tif")
g.to_geotiff(fpath, compress=None)
self.grid = karta.read_geotiff(fpath, in_memory=False)
def test_slicing_virtual(self):
""" make sure that slicing a disk-based array works """
self.grid[5:10, 7:15]
self.grid[5:10:2, 7:15]
self.grid[10:5:-1, 7:15]
a1 = self.grid[12, 7:15, 0]
self.assertEqual(a1.shape, (8,))
a2 = self.grid[5:10, 9, 0]
self.assertEqual(a2.shape, (5,))
b = self.grid[12, 13, 0]
self.assertEqual(type(b), np.float64)
return
def test_iteration_virtual(self):
i = 0
for row in self.grid.values:
i += 1
self.assertEqual(i, 100)
return
def peaks(n=49):
""" 2d peaks function of MATLAB logo fame. """
X, Y = np.meshgrid(np.linspace(-3, 3, n), np.linspace(-3, 3, n))
return 3.0 * (1-X)**2 * np.exp(-X**2 - (Y+1)**2) \
- 10.0 * (X/5.0 - X**3 - Y**5) * np.exp(-X**2 - Y**2) \
- 1.0/3.0 * np.exp(-(X+1)**2 - Y**2)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
6457419 | from pydantic import Field, BaseModel
from typing import Optional
from datetime import datetime
class WeekSave(BaseModel):
"""设备每周节能率模型
上周最后一条和本周最后一条累积数差值
"""
# 设备序列号
serial_number: str = Field('', title="设备序列号")
# 周节能率的最后一天
log_date: str = Field('', title="结束日期")
prev_time: Optional[datetime] = Field(None, title="前一时刻")
curr_time: Optional[datetime] = Field(None, title="当前时刻")
cumulative_heat_time: int = Field(0, title="累积加热时间")
cumulative_use_electricity: int = Field(0, title="累积用电量")
cumulative_electricity_saving: int = Field(0, title="累计省电量")
cumulative_heat_water: int = Field(0, title="累计使用热水量")
cumulative_duration_machine: int = Field(0, title="累计使用时间")
save_ratio: float = Field(0, title="节能率")
is_valid: int = Field(0, title='数据异常状态,0表示正常')
utctime: Optional[datetime]
| StarcoderdataPython |
6494269 | <filename>test/test_preprocessor.py
"""
.. codeauthor:: <NAME> <<EMAIL>>
"""
import pytest
from dataproperty import LineBreakHandling, Preprocessor
class Test_Preprocessor_update:
def test_normal(self):
preprocessor = Preprocessor()
assert preprocessor.strip_str is None
assert preprocessor.replace_tabs_with_spaces is True
assert preprocessor.tab_length == 2
assert preprocessor.line_break_handling is LineBreakHandling.NOP
assert preprocessor.line_break_repl == " "
assert preprocessor.dequote is False
assert preprocessor.is_escape_html_tag is False
assert preprocessor.is_escape_formula_injection is False
assert preprocessor.update(
strip_str='"',
replace_tabs_with_spaces=False,
tab_length=4,
line_break_handling=LineBreakHandling.REPLACE,
line_break_repl="<br>",
dequote=True,
is_escape_html_tag=True,
is_escape_formula_injection=True,
)
assert preprocessor.strip_str == '"'
assert preprocessor.replace_tabs_with_spaces is False
assert preprocessor.tab_length == 4
assert preprocessor.line_break_handling is LineBreakHandling.REPLACE
assert preprocessor.line_break_repl == "<br>"
assert preprocessor.dequote is True
assert preprocessor.is_escape_html_tag is True
assert preprocessor.is_escape_formula_injection is True
assert not preprocessor.update(strip_str='"')
assert preprocessor.update(strip_str="")
class Test_Preprocessor_preprocess:
@pytest.mark.parametrize(
["value", "expected"],
[
['abc "efg"', 'abc "efg"'],
['"abc efg"', "abc efg"],
["'abc efg'", "abc efg"],
['"abc" "efg"', '"abc" "efg"'],
["'abc' 'efg'", "'abc' 'efg'"],
["\"abc 'efg'\"", "abc 'efg'"],
],
)
def test_normal_dequote(self, value, expected):
preprocessor = Preprocessor(
dequote=True,
)
data, no_ansi_escape_data = preprocessor.preprocess(value)
assert data == expected
class Test_Preprocessor_preprocess_string:
@pytest.mark.parametrize(
["value", "expected"],
[
[{"1": 1}, {"1": 1}],
[{"1"}, {"1"}],
],
)
def test_not_str(self, value, expected):
preprocessor = Preprocessor(dequote=True)
data, _ = preprocessor._Preprocessor__preprocess_string(value)
assert data == expected
| StarcoderdataPython |
11354383 | # -*- coding: utf-8 -*-
# Author: XuMing <<EMAIL>>
# Data: 17/10/16
# Brief:
config = {
"qa_examples_file": "../data/qa.examples.train.e2e.top10.filter.tsv",
"word_embeddings_file": "../data/word_embedding/glove.6B.100d.txt",
"vocabulary_size": 400000,
"embedding_size": 100,
"num_classes": 6,
"filter_sizes": [3, 4],
"num_filters": 4,
"dropout_keep_prob": 0.85,
"embeddings_trainable": True,
"total_iter": 100000,
"batch_size": 400,
"val_size": 400,
"l2_reg_lambda": 0.1
}
| StarcoderdataPython |
11304612 | <gh_stars>0
from __future__ import print_function
from functools import partial
from urllib.request import urlretrieve
import pandas as pd
import torch
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
# hyper-opt
from hyperopt import hp, Trials, STATUS_OK, tpe, fmin
from hyperopt import space_eval
from hyperopt.pyll import scope
from tensorflow.keras.layers import Dense, Dropout, Activation
from tensorflow.keras.models import Sequential
# utils
import pickle
import argparse
import os
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.models import load_model
from metrics import ConfusionMatrix, Metric
from collections import Counter
data_dict = {
'adult_income' : ('adult_income', 'income'),
'compas' : ('compas', 'two_year_recid'),
'default_credit' : ('default_credit', 'DEFAULT_PAYEMENT'),
'marketing' : ('marketing', 'subscribed')
}
subgroup_dict = {
'adult_income' : ('gender_Female', 'gender_Male'),
'compas' : ('race_African-American', 'race_Caucasian'),
'default_credit' : ('SEX_Female', 'SEX_Male'),
'marketing' : ('age_age:not30-60', 'age_age:30-60')
}
def prepare_data(data, rseed):
dataset, decision = data_dict[data]
datadir = './preprocessed/{}/'.format(dataset)
#filenames
suffix = 'OneHot'
train_file = '{}{}_train{}_{}.csv'.format(datadir, dataset, suffix, rseed)
test_file = '{}{}_test{}_{}.csv'.format(datadir, dataset, suffix, rseed)
# load dataframe
df_train = pd.read_csv(train_file)
df_test = pd.read_csv(test_file)
# prepare the data
scaler = StandardScaler()
## training set
y_train = df_train[decision]
X_train = df_train.drop(labels=[decision], axis = 1)
X_train = scaler.fit_transform(X_train)
### cast
X_train = np.asarray(X_train).astype(np.float32)
y_train = np.asarray(y_train).astype(np.float32)
## test set
y_test = df_test[decision]
X_test = df_test.drop(labels=[decision], axis = 1)
X_test = scaler.fit_transform(X_test)
### cast
X_test = np.asarray(X_test).astype(np.float32)
y_test = np.asarray(y_test).astype(np.float32)
return X_train, y_train, X_test, y_test
def prepare_data_as_dataframe(data, rseed):
dataset, _ = data_dict[data]
datadir = './preprocessed/{}/'.format(dataset)
#filenames
train_file = '{}{}_trainOneHot_{}.csv'.format(datadir, dataset, rseed)
test_file = '{}{}_testOneHot_{}.csv'.format(datadir, dataset, rseed)
# load dataframe
df_train = pd.read_csv(train_file)
df_test = pd.read_csv(test_file)
return df_train, df_test
def get_metrics(dataset, model_class, rseed):
# load data as np array
X_train, y_train, X_test, y_test = prepare_data(dataset, rseed)
# load data as dataframe
df_train, df_test = prepare_data_as_dataframe( dataset, rseed)
# load meta data for fairness metrics
# _, decision = data_dict[dataset]
decision = y_train
min_feature, maj_feature = subgroup_dict[dataset]
print("---------------------------->>> dataset = {}".format(dataset))
print("---------------------------->>> model = {}".format(model_class))
# model path
outdir = './pretrained/{}/'.format(dataset)
model_path = '{}{}_{}.h5'.format(outdir, model_class, rseed)
def get_predictions(model_class, X_train, y_train, X_test, y_test):
predictions_train, predictions_test = None, None
acc_train, acc_test = None, None
prediction_metrics = {}
if model_class == 'DNN':
# load model
mdl = load_model(model_path)
print('model loaded')
# get prediction
#---train
predictions_train = (mdl.predict(X_train) > 0.5).astype('int32')
predictions_train = [x[0] for x in predictions_train]
print(Counter(predictions_train))
#---test
predictions_test = (mdl.predict(X_test) > 0.5).astype('int32')
predictions_test = [x[0] for x in predictions_test]
# get accuracy
acc_train = mdl.evaluate(X_train, y_train)[1]
acc_test = mdl.evaluate(X_test, y_test)[1]
print(acc_train, acc_test)
if model_class in ['RF', 'AdaBoost', 'XgBoost']:
# load model
mdl = pickle.load(open(model_path,"rb"))
# get prediction
#---train
predictions_train = mdl.predict(X_train)
predictions_train = [int(x) for x in predictions_train]
# print('predictions_train',predictions_train)
#---test
predictions_test = mdl.predict(X_test)
predictions_test = [int(x) for x in predictions_test]
# get accuracy
acc_train = accuracy_score(y_train, mdl.predict(X_train))
acc_test = accuracy_score(y_test, mdl.predict(X_test))
#----train
prediction_metrics['predictions_train'] = predictions_train
prediction_metrics['acc_train'] = acc_train
#----test
prediction_metrics['predictions_test'] = predictions_test
prediction_metrics['acc_test'] = acc_test
return prediction_metrics
def get_fairness_metrics(df_train, df_test, prediction_metrics):
# output object
fairness_metrics = {}
#----train
df_train['predictions'] = prediction_metrics['predictions_train']
cm_train = ConfusionMatrix(df_train[min_feature], df_train[maj_feature], df_train['predictions'], decision)
cm_minority_train, cm_majority_train = cm_train.get_matrix()
fm_train = Metric(cm_minority_train, cm_majority_train)
#----test
df_test['predictions'] = prediction_metrics['predictions_test']
cm_test = ConfusionMatrix(df_test[min_feature], df_test[maj_feature], df_test['predictions'], y_test)
cm_minority_test, cm_majority_test = cm_test.get_matrix()
fm_test = Metric(cm_minority_test, cm_majority_test)
fairness_metrics['train'] = fm_train
fairness_metrics['test'] = fm_test
return fairness_metrics
def get_output(dataset, model_class, output_type, prediction_metrics, fairness_metrics):
metrics = [1, 3, 4, 5]
metrics_map = {
1: 'SP',
2: 'PP',
3: 'PE',
4: 'EOpp',
5: 'EOdds',
6: 'CUAE'
}
res = []
for metric in metrics:
dd = {}
# model class
dd['model_class'] = model_class
# unfairness
dd['unfairness'] = np.round(fairness_metrics['{}'.format(output_type)].fairness_metric(metric), 3)
# metric
dd['metric'] = metrics_map[metric]
res.append(dd)
return res
prediction_metrics = get_predictions(model_class, X_train, y_train, X_test, y_test)
fairness_metrics = get_fairness_metrics(df_train, df_test, prediction_metrics)
output_train = get_output(dataset, model_class, 'train', prediction_metrics, fairness_metrics)
output_test = get_output(dataset, model_class, 'test', prediction_metrics, fairness_metrics)
return output_train, output_test
def process_model_class(dataset, model_class):
test_list = [ [], [], [], [] ]
for rseed in range(1):
_, output_test = get_metrics(dataset, model_class, rseed)
test_list[0].append(output_test[0])
test_list[1].append(output_test[1])
test_list[2].append(output_test[2])
test_list[3].append(output_test[3])
output_test = []
for ll in test_list:
dd = {
'model_class' : ll[0]['model_class'],
'unfairness' : np.round(np.mean([dd['unfairness'] for dd in ll]), 3),
'metric' : ll[0]['metric'],
}
output_test.append(dd)
return output_test
if __name__ == '__main__':
# inputs
datasets = ['adult_income', 'compas', 'default_credit', 'marketing']
model_classes = ['AdaBoost','DNN', 'RF', 'XgBoost']
for dataset in datasets:
row_list = []
for model_class in model_classes:
output_test = process_model_class(dataset, model_class)
for dd in output_test:
print(dd)
dd['group'] = 'Non-Members'
print(dd)
row_list.append(dd)
save_dir = ('./results/unfairness_bbox')
os.makedirs(save_dir, exist_ok=True)
filename = '{}/{}.csv'.format(save_dir, dataset)
df = pd.DataFrame(row_list)
df.to_csv(filename, encoding='utf-8', index=False)
| StarcoderdataPython |
4952878 | # Generated by Django 3.1.3 on 2020-12-02 21:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20201130_2256'),
]
operations = [
migrations.AddField(
model_name='unemploymentbyindustry',
name='indutry_id',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AddField(
model_name='unemploymentbyoccupation',
name='occupation_id',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AlterField(
model_name='growthratebyageeducation',
name='age',
field=models.IntegerField(unique=True),
),
migrations.AlterField(
model_name='growthratebyageeducation',
name='associates',
field=models.DecimalField(decimal_places=3, max_digits=6),
),
migrations.AlterField(
model_name='growthratebyageeducation',
name='attorney',
field=models.DecimalField(decimal_places=3, max_digits=6),
),
migrations.AlterField(
model_name='growthratebyageeducation',
name='bachelors',
field=models.DecimalField(decimal_places=3, max_digits=6),
),
migrations.AlterField(
model_name='growthratebyageeducation',
name='diploma',
field=models.DecimalField(decimal_places=3, max_digits=6),
),
migrations.AlterField(
model_name='growthratebyageeducation',
name='doctorate',
field=models.DecimalField(decimal_places=3, max_digits=6),
),
migrations.AlterField(
model_name='growthratebyageeducation',
name='dropout',
field=models.DecimalField(decimal_places=3, max_digits=6),
),
migrations.AlterField(
model_name='growthratebyageeducation',
name='license',
field=models.DecimalField(decimal_places=3, max_digits=6),
),
migrations.AlterField(
model_name='growthratebyageeducation',
name='masters',
field=models.DecimalField(decimal_places=3, max_digits=6),
),
migrations.AlterField(
model_name='growthratebyageeducation',
name='mba',
field=models.DecimalField(decimal_places=3, max_digits=6),
),
migrations.AlterField(
model_name='growthratebyageeducation',
name='professional',
field=models.DecimalField(decimal_places=3, max_digits=6),
),
migrations.AlterField(
model_name='growthratebyageeducation',
name='some_college',
field=models.DecimalField(decimal_places=3, max_digits=6),
),
migrations.AlterField(
model_name='pricing',
name='interest_rate',
field=models.DecimalField(decimal_places=3, max_digits=6),
),
migrations.AlterField(
model_name='pricing',
name='max_cagr',
field=models.DecimalField(decimal_places=3, max_digits=6),
),
migrations.AlterField(
model_name='pricing',
name='min_cagr',
field=models.DecimalField(decimal_places=3, max_digits=6),
),
migrations.AlterField(
model_name='pricing',
name='payment_cap_factor',
field=models.DecimalField(decimal_places=3, max_digits=6),
),
migrations.AlterField(
model_name='pricing',
name='prepayment_fv',
field=models.DecimalField(decimal_places=3, max_digits=6),
),
migrations.AlterField(
model_name='pricing',
name='prepayment_growth',
field=models.DecimalField(decimal_places=3, max_digits=6),
),
migrations.AlterField(
model_name='pricing',
name='targeted_cagr',
field=models.DecimalField(decimal_places=3, max_digits=6),
),
migrations.AlterField(
model_name='pricing',
name='term',
field=models.IntegerField(unique=True),
),
]
| StarcoderdataPython |
1636458 | # Generated by Django 3.1.13 on 2021-12-10 11:04
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("covid_cases", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="SACoronavirusCounter",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"tests",
models.PositiveIntegerField(help_text="Total tests completed"),
),
(
"positive",
models.PositiveIntegerField(
help_text="Total positive cases identified"
),
),
(
"recoveries",
models.PositiveIntegerField(help_text="Total recoveries"),
),
("deaths", models.PositiveIntegerField(help_text="Total deaths")),
(
"vaccines",
models.PositiveIntegerField(
help_text="Total vaccines administered"
),
),
(
"date",
models.DateField(
default=datetime.date.today, help_text="The day the data is for"
),
),
(
"created_at",
models.DateTimeField(
auto_now_add=True,
help_text="When this was added to the database",
),
),
(
"updated_at",
models.DateTimeField(
auto_now=True, help_text="When this was last updated"
),
),
],
),
migrations.AddConstraint(
model_name="sacoronaviruscounter",
constraint=models.UniqueConstraint(
fields=("date",), name="unique_counters"
),
),
]
| StarcoderdataPython |
9693142 | <filename>Machine Learning Projects/Speech Controlled LED/speech_led.py
import os
import time
import playsound
import speech_recognition as sr
from gtts import gTTS
import serial
import time
ser = serial.Serial('COM4', 9600)
print(ser.open().value())
text=input("Enter which led should be turned on GREEN /RED /YELLOW:")
def speak(text):
tts=gTTS(text=text,lang="en")
filename="voice.mp3"
tts.save(filename)
playsound.playsound(filename)
def get_audio():
r=sr.Recognizer()
with sr.Microphone() as source:
audio=r.listen(source)
said=""
try :
said=r.recognize_google(audio)
print(said)
except Exception as e:
print("Exception"+str(e))
return said
def led_on_off():
text=get_audio()
if (text=="green"):
print('LED GREEN IS ON')
time.sleep(0.1)
ser.write(b'G')
led_on_off()
elif(text=="red"):
print('LED RED IS ON')
time.sleep(0.1)
ser.write(b'R')
led_on_off()
elif(text=="yellow"):
print('LED YELLOW IS ON')
time.sleep(0.1)
ser.write(b'Y')
led_on_off()
else:
print('ALL LEDS ARE OFF')
led_on_off()
time.sleep(2) # wait for the serial connection to initialize
led_on_off()
| StarcoderdataPython |
9653177 | import torch as th
from torch.utils.data import Dataset
import pandas as pd
import os
import numpy as np
import ffmpeg
class VideoLoader(Dataset):
"""Pytorch video loader."""
def __init__(
self,
csv,
framerate=1,
size=112,
centercrop=False,
):
"""
Args:
"""
self.csv = pd.read_csv(csv)
self.centercrop = centercrop
self.size = size
self.framerate = framerate
def __len__(self):
return len(self.csv)
def _get_video_dim(self, video_path):
probe = ffmpeg.probe(video_path)
video_stream = next((stream for stream in probe['streams']
if stream['codec_type'] == 'video'), None)
width = int(video_stream['width'])
height = int(video_stream['height'])
return height, width
def _get_output_dim(self, h, w):
if isinstance(self.size, tuple) and len(self.size) == 2:
return self.size
elif h >= w:
return int(h * self.size / w), self.size
else:
return self.size, int(w * self.size / h)
def __getitem__(self, idx):
video_path = self.csv['video_path'].values[idx]
output_file = self.csv['feature_path'].values[idx]
if not(os.path.isfile(output_file)) and os.path.isfile(video_path):
print('Decoding video: {}'.format(video_path))
try:
h, w = self._get_video_dim(video_path)
except:
print('ffprobe failed at: {}'.format(video_path))
return {'video': th.zeros(1), 'input': video_path,
'output': output_file}
height, width = self._get_output_dim(h, w)
cmd = (
ffmpeg
.input(video_path)
.filter('fps', fps=self.framerate)
.filter('scale', width, height)
)
if self.centercrop:
x = int((width - self.size) / 2.0)
y = int((height - self.size) / 2.0)
cmd = cmd.crop(x, y, self.size, self.size)
out, _ = (
cmd.output('pipe:', format='rawvideo', pix_fmt='rgb24')
.run(capture_stdout=True, quiet=True)
)
if self.centercrop and isinstance(self.size, int):
height, width = self.size, self.size
video = np.frombuffer(out, np.uint8).reshape([-1, height, width, 3])
video = th.from_numpy(video.astype('float32'))
video = video.permute(0, 3, 1, 2)
else:
video = th.zeros(1)
return {'video': video, 'input': video_path, 'output': output_file}
| StarcoderdataPython |
8051036 | <filename>mindspore/python/mindspore/_extends/graph_kernel/expanders/maximum_grad.py
# Copyright 2020-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===========================================================================
"""generate json desc for maximum_grad"""
from mindspore._extends.graph_kernel.model.model import GraphKernelUnsupportedException as GKException
from ._utils import Expander, ExpanderInfoValidator as VLD
from .minimum_grad import MinimumGrad
@VLD.check_all_formats_same
class MaximumGrad(Expander):
"""MaximumGrad expander"""
def _check(self):
if not self.attrs.get('grad_x', True) and not self.attrs.get('grad_y', True):
raise GKException("For 'MaximumGrad', value of attr 'grad_x' and 'grad_y' should be False, but got {} and "
"{}".format(self.attrs.get('grad_x'), self.attrs.get('grad_y')))
return super()._check()
def _expand(self, graph_builder):
input_x, input_y, input_dout = self.inputs
ge_result = graph_builder.emit('GreaterEqual', [input_x, input_y])
ge_result = graph_builder.emit('Cast', [ge_result], attrs={'dst_type': input_x.dtype})
dx = graph_builder.emit('Mul', [ge_result, input_dout])
dy = graph_builder.emit('Sub', [input_dout, dx])
reduce_axis_x = MinimumGrad.get_reduce_axis(input_x.shape, dx.shape)
reduce_axis_y = MinimumGrad.get_reduce_axis(input_y.shape, dy.shape)
if reduce_axis_x:
dx_reduce = graph_builder.emit('ReduceSum', [dx], attrs={'reduce_axis': reduce_axis_x, 'keep_dims': False})
if dx_reduce.shape != input_x.shape:
dx_out = graph_builder.emit('Reshape', [dx_reduce], attrs={'shape': input_x.shape})
else:
dx_out = dx_reduce
else:
dx_out = dx
if reduce_axis_y:
dy_reduce = graph_builder.emit('ReduceSum', [dy], attrs={'reduce_axis': reduce_axis_y, 'keep_dims': False})
if dy_reduce.shape != input_y.shape:
dy_out = graph_builder.emit('Reshape', [dy_reduce], attrs={'shape': input_y.shape})
else:
dy_out = dy_reduce
else:
dy_out = dy
# output two results, regardless of grad_x and grad_y
return dx_out, dy_out
| StarcoderdataPython |
3222760 | import datetime
import django.test
from django.utils import timezone
from ftc.models import Organisation, OrganisationType, Scrape, Source
class TestCase(django.test.TestCase):
def setUp(self):
ot = OrganisationType.objects.create(title="Registered Charity")
ot2 = OrganisationType.objects.create(
title="Registered Charity (England and Wales)"
)
s = Source.objects.create(
id="ts",
data={
"title": "Test source",
"publisher": {
"name": "Source publisher",
},
},
)
scrape = Scrape.objects.create(
status=Scrape.ScrapeStatus.SUCCESS,
spider="test",
errors=0,
items=1,
log="",
start_time=timezone.now() - datetime.timedelta(minutes=10),
finish_time=timezone.now() - datetime.timedelta(minutes=5),
)
Organisation.objects.create(
org_id="GB-CHC-1234",
orgIDs=["GB-CHC-1234"],
description="Test description",
name="Test organisation",
active=True,
organisationTypePrimary=ot,
source=s,
scrape=scrape,
organisationType=[ot.slug, ot2.slug],
)
| StarcoderdataPython |
1790912 | def mailbox_data():
return [
{"section": "Capture", "endpoints": [
{"name": "displayCapture", "args": ["str usr_uid", "JCapture cap", "list<JDevicePoint> points" ], "chatty": True },
{"name": "requestActiveCapture", "args": ["int test", "long other test"], "qml_only": True },
{"name": "activeCapture", "args": ["JCaptureStorageInfo capture", "list<JDevicePoint> points"], "remote": True },
{"name": "setInteractionStatus", "args": ["bool capture", "int visibility"] },
]},
]
| StarcoderdataPython |
3416994 | import pyautogui, time
time.sleep(5)
file = open('spam.txt', 'r', encoding='utf-8' )
for row in file:
pyautogui.typewrite(row)
pyautogui.press('enter')
time.sleep(0.1) | StarcoderdataPython |
1804994 | <filename>src/api_gateway_lambda_proxy/response.py
"""Data classes for response of AWS Lambda proxy integration."""
from __future__ import annotations
import dataclasses
import json
from dataclasses import field
from typing import Any, Dict, List, Optional
RawProxyResponse = Dict[str, Any]
@dataclasses.dataclass()
class BaseProxyResponse:
"""Base Proxy response."""
statusCode: int
body: Any = None
headers: Dict[str, str] = field(default_factory=dict)
multiValueHeaders: Dict[str, List[str]] = field(default_factory=dict)
isBase64Encoded: bool = False
def _body_encoder(self, body: Optional[str]) -> Optional[str]:
return body
def to_raw(self) -> RawProxyResponse:
"""Convert to RawProxyResponse."""
result = {
'statusCode': self.statusCode,
'headers': self.headers,
'multiValueHeaders': self.multiValueHeaders,
'isBase64Encoded': self.isBase64Encoded
}
body_str = self._body_encoder(self.body)
if body_str is not None:
result['body'] = body_str
return result
@dataclasses.dataclass()
class JsonProxyResponse(BaseProxyResponse):
"""Proxy response whose body is JSON."""
headers: Dict[str, str] = \
field(default_factory=lambda: {'Content-Type': 'application/json'})
def _body_encoder(self, body: Any) -> Optional[str]:
return None if body is None else json.dumps(body)
| StarcoderdataPython |
11353364 | # coding=utf-8
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Services for working with Change Log via UI."""
from lib import base, factory
from lib.constants import objects, element
from lib.page.widget import change_log
from lib.utils import ui_utils
class ChangeLogService(base.WithBrowser):
"""Class for Change Log business layer's services objects."""
def open_obj_changelog_tab(self, obj):
"""Open obj info page and open changelog tab of an obj.
Returns:
ReadonlyChangeLog element for disabled objects or ChangeLog element for
other objects."""
info_page = factory.get_cls_webui_service(objects.get_plural(obj.type))(
self._driver).open_info_page_of_obj(obj)
info_page.tabs.ensure_tab(element.TabContainer.CHANGE_LOG_TAB)
ui_utils.wait_for_spinner_to_disappear()
return (change_log.ChangeLog()
if objects.get_plural(obj.type) not in objects.DISABLED_OBJECTS
else change_log.ReadonlyChangeLog())
def get_obj_changelog(self, obj):
"""Returns object Change Log as list of ChangeLogItemEntity instances."""
return self.open_obj_changelog_tab(obj).get_changelog_items()
| StarcoderdataPython |
57953 | <filename>clinja/__init__.py<gh_stars>0
from .clinja import ClinjaDynamic, ClinjaStatic
__version__ = "1.1.0"
__author__ = "<NAME> <<EMAIL>>"
| StarcoderdataPython |
11237419 | import pickle
import cv2
import numpy as np
def saveDescriptorListFile(descriptor, descriptorFilename):
file = open(descriptorFilename,"wb")
pickle.dump(descriptor, file)
file.close()
def readDescriptorListFile(descriptorFilename):
file = open(descriptorFilename,"rb")
descriptorList = pickle.load(file)
return descriptorList
def accurateDistance(aKeyPoints, A, bKeyPoints, B):
lamda = 0.25
r0 = 270 #avg image size of Caltech 101
oneWayDistance = 0
A = A.tolist()
B = B.tolist()
for i in range(len(A)):
A_x = aKeyPoints[i].pt[0]
A_y = aKeyPoints[i].pt[1]
descriptorI = A[i]
featureDistance = []
for j in range (len(B)):
B_x = bKeyPoints[j].pt[0]
B_y = bKeyPoints[j].pt[1]
descriptorJ = B [j]
tmpDist = 0
for k in range (len(descriptorJ)):
tmpDist = tmpDist + (descriptorI[k] - descriptorJ[k])
featureDistance +=[ (tmpDist)**2 + (lamda/r0)*(abs(A_x - B_x) + abs(A_y - B_y)) ]
if(len(featureDistance) != 0):
oneWayDistance = oneWayDistance + min(featureDistance)
return oneWayDistance/len(A)
def getNeighbours(queryImage, trainLst):
img = cv2.imread(queryImage)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
sift = cv2.SIFT_create()
querykeypoints, queryDescriptor = sift.detectAndCompute(gray, None)
distanceArray = []
distanceImage = []
trainFile = open(trainLst,"r")
for image in trainFile:
image = image[:-1]
imageKeypoints = sift.detect(cv2.cvtColor(cv2.imread(image), cv2.COLOR_BGR2GRAY))
imageDescriptorFile = image[:-4] + '.decp'
imageDescriptor = readDescriptorListFile (imageDescriptorFile)
distance = accurateDistance(imageKeypoints, imageDescriptor,querykeypoints, queryDescriptor) + accurateDistance(querykeypoints, queryDescriptor, imageKeypoints, imageDescriptor)
print(distance)
# bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True)
# matches = bf.match(queryDescriptor,imageDescriptor)
# print(len(queryDescriptor), len(imageDescriptor), len(matches))
# matches = sorted(matches, key = lambda x:x.distance)
distanceImage.append(image)
distanceArray.append(distance)
# print(len(matches))
# print("queryid", "trainid", "imageid", "distance")
# for i in range(len(matches)):
# print(matches[i].queryIdx, matches[i].trainIdx, matches[i].imgIdx, matches[i].distance)
allNeighbours = list(zip(distanceImage, distanceArray))
allNeighbours.sort(key = lambda x: x[1])
trainFile.close()
KNNs = []
for i in allNeighbours[:30]:
KNNs += [i[0]]
return KNNs, queryDescriptor
def checkSameClass(KNN):
cats = []
cats += [KNN[0].split("/")[-2]]
NN = KNN[1:]
i = 0
for image in NN:
cats += [image.split("/")[-2]]
i = i + 1
if cats[0] != cats[i]:
return False, cats[0]
return True,cats[0]
def getTrainingData(KNN):
data = []
labels = []
for neighbour in KNN:
# print(neighbour)
neighbour = neighbour[:-4] + '.decp'
temp = readDescriptorListFile(neighbour)
temp = temp.tolist()
data += temp
cat = neighbour.split('/')[-2]
for i in range(len(temp)):
labels.append(cat)
return data, labels | StarcoderdataPython |
5158217 | <filename>src/doom_replay.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
# Copyright (c) 2017. <NAME>, <NAME>, <NAME>, #
# <NAME>. All rights reserved. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 27-05-2019 #
# Authors: <NAME>, <NAME>, <NAME>, <NAME> #
# E-mail: <EMAIL> #
# Website: vincenzolomonaco.com #
################################################################################
import glob
import time
import argparse
from vizdoom import *
def replay(config, skiprate, path):
game = DoomGame()
#game.set_doom_game_path(wad)
game.load_config(config)
game.set_screen_resolution(ScreenResolution.RES_800X600)
game.set_window_visible(True)
game.set_render_hud(True)
game.set_mode(Mode.ASYNC_PLAYER)
game.init()
for episode_file in glob.glob(path + '/*.lmp'):
time.sleep(5)
print('replay episode:', episode_file)
game.replay_episode(episode_file)
while not game.is_episode_finished():
state = game.get_state()
game.advance_action(skiprate)
reward = game.get_last_reward()
print('State #{}: reward = {}'.format(state.number, reward))
print('total reward:', game.get_total_reward())
game.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Doom Recorder')
parser.add_argument('--vizdoom_config', default='world.cfg', help='vizdoom config path')
parser.add_argument('--skiprate', type=int, default=1, help='number of skipped frames')
parser.add_argument('--path', default='.', help='.lmp files path')
args = parser.parse_args()
replay(args.vizdoom_config, args.skiprate, args.path)
| StarcoderdataPython |
1850073 | """
Provides an interface to CUDA for running the parallel IBDTW and
partial IBDTW algorithms
"""
import pycuda.autoinit
import pycuda.driver as drv
import pycuda.gpuarray as gpuarray
import pycuda.cumath
import numpy as np
import matplotlib.pyplot as plt
import time
import scipy.io as sio
import pkg_resources
import sys
import Alignment
from Alignment.Alignments import *
from Alignment.AlignmentTools import *
import Alignment._SequenceAlignment as SAC
from pycuda.compiler import SourceModule
Alignment.DTW_ = None
Alignment.DTWSSM_ = None
Alignment.SMWat_ = None
Alignment.SMWatSSM_ = None
def getResourceString(filename):
s = ''
if 'Alignment' in sys.modules:
s = pkg_resources.resource_string('Alignment', '/%s'%filename)
elif 'SSMTW.Alignment' in sys.modules:
s = pkg_resources.resource_string('SSMTW.Alignment', '/%s'%filename)
else:
#If calling from within this directory
fin = open(filename)
s = fin.read()
fin.close()
return s.decode('utf8')
def initParallelAlgorithms():
s = getResourceString("DTWGPU.cu")
mod = SourceModule(s)
Alignment.DTW_ = mod.get_function("DTW")
s = getResourceString("DTWSSMGPU.cu")
mod = SourceModule(s)
Alignment.DTWSSM_ = mod.get_function("DTWSSM")
s = getResourceString("SMWatGPU.cu")
mod = SourceModule(s)
Alignment.SMWat_ = mod.get_function("SMWat")
s = getResourceString("SMWatSSMGPU.cu")
mod = SourceModule(s)
Alignment.SMWatSSM_ = mod.get_function("SMWatSSM")
def roundUpPow2(x):
return np.array(int(2**np.ceil(np.log2(float(x)))), dtype=np.int32)
def doDTWGPU(CSM, ci, cj):
#Minimum dimension of array can be at max size 1024
#for this scheme to fit in memory
M = CSM.shape[0]
N = CSM.shape[1]
diagLen = np.array(min(M, N), dtype = np.int32)
diagLenPow2 = roundUpPow2(diagLen)
NThreads = min(diagLen, 512)
res = gpuarray.to_gpu(np.array([0.0], dtype=np.float32))
M = np.array(M, dtype=np.int32)
N = np.array(N, dtype=np.int32)
ci = np.array(ci, dtype = np.int32)
cj = np.array(cj, dtype = np.int32)
Alignment.DTW_(CSM, M, N, ci, cj, diagLen, diagLenPow2, res, block=(int(NThreads), 1, 1), grid=(1, 1), shared=12*diagLen)
ret = res.get()[0]
return ret
def doIBDTWGPU(SSMA, SSMB, returnCSM = False, printElapsedTime = False):
"""
:param SSMA: MxM self-similarity matrix of first curve (gpuarray)
:param SSMB: NxN self-similarity matrix of second curve (gpuarray)
:param returnCSM: If True, return the CSM. If false, just return the final cost
:param printElapsedTime: Print the elapsed time
"""
M = SSMA.shape[0]
N = SSMB.shape[0]
if not type(SSMA) == gpuarray:
SSMA = gpuarray.to_gpu(np.array(SSMA, dtype = np.float32))
if not type(SSMB) == gpuarray:
SSMB = gpuarray.to_gpu(np.array(SSMB, dtype = np.float32))
CSM = np.zeros((M, N), dtype=np.float32)
CSM = gpuarray.to_gpu(CSM)
diagLen = np.array(min(M, N), dtype = np.int32)
diagLenPow2 = roundUpPow2(diagLen)
NThreads = min(diagLen, 512)
M = np.array(M, dtype=np.int32)
N = np.array(N, dtype=np.int32)
tic = time.time()
Alignment.DTWSSM_(SSMA, SSMB, CSM, M, N, diagLen, diagLenPow2, block=(int(NThreads), 1, 1), grid=(int(M), int(N)), shared=12*diagLen)
if returnCSM:
return CSM.get()
else:
res = doDTWGPU(CSM, 0, 0)
if printElapsedTime:
print("Elapsed Time GPU: ", time.time() - tic)
return res
def doSMWatGPU(CSM, hvPenalty):
#Minimum dimension of array can be at max size 1024
#for this scheme to fit in memory
M = CSM.shape[0]
N = CSM.shape[1]
D = np.zeros((M, N), dtype=np.float32)
D = gpuarray.to_gpu(D)
U = np.zeros((M, N), dtype=np.float32)
U = gpuarray.to_gpu(U)
L = np.zeros((M, N), dtype=np.float32)
L = gpuarray.to_gpu(L)
UL = np.zeros((M, N), dtype=np.float32)
UL = gpuarray.to_gpu(UL)
phvPenalty = np.array(hvPenalty, dtype = np.float32)
diagLen = np.array(min(M, N), dtype = np.int32)
diagLenPow2 = roundUpPow2(diagLen)
NThreads = min(diagLen, 512)
M = np.array(M, dtype=np.int32)
N = np.array(N, dtype=np.int32)
Alignment.SMWat_(CSM, D, U, L, UL, M, N, diagLen, diagLenPow2, phvPenalty, block=(int(NThreads), 1, 1), grid=(1, 1), shared=12*diagLen)
return {'D':D.get(), 'U':U.get(), 'L':L.get(), 'UL':UL.get()}
def doIBSMWatGPUHelper(SSMA, SSMB, hvPenalty, flip = False):
"""
:param SSMA: MxM self-similarity matrix of first curve (gpuarray)
:param SSMB: NxN self-similarity matrix of second curve (gpuarray)
"""
M = SSMA.shape[0]
N = SSMB.shape[0]
CSM = np.zeros((M, N), dtype=np.float32)
CSM = gpuarray.to_gpu(CSM)
diagLen = np.array(min(M, N), dtype = np.int32)
diagLenPow2 = roundUpPow2(diagLen)
NThreads = min(diagLen, 512)
M = np.array(M, dtype=np.int32)
N = np.array(N, dtype=np.int32)
pflip = np.array(0, dtype=np.int32)
if flip:
pflip = np.array(1, dtype=np.int32)
phvPenalty = np.array(hvPenalty, dtype = np.float32)
Alignment.SMWatSSM_(SSMA, SSMB, CSM, M, N, diagLen, diagLenPow2, phvPenalty, pflip, block=(int(NThreads), 1, 1), grid=(int(M), int(N)), shared=12*diagLen)
CSM = CSM.get()
return CSM
def flrud(A):
return np.fliplr(np.flipud(A))
def doIBSMWatGPU(SSMA, SSMB, hvPenalty, printElapsedTime = False):
tic = time.time()
if not type(SSMA) == gpuarray:
SSMA = gpuarray.to_gpu(np.array(SSMA, dtype = np.float32))
if not type(SSMB) == gpuarray:
SSMB = gpuarray.to_gpu(np.array(SSMB, dtype = np.float32))
CSM = doIBSMWatGPUHelper(SSMA, SSMB, hvPenalty, False)
CSM = CSM + flrud(doIBSMWatGPUHelper(SSMA, SSMB, hvPenalty, True))
if printElapsedTime:
print("Elapsed Time Smith Waterman GPU: %g"%(time.time() - tic))
return CSM
| StarcoderdataPython |
9613380 | <gh_stars>0
import os
import click
import math
import datetime
from fb_dfg import config
from fb_dfg import main
from fb_dfg import utils
@click.group()
def cli():
pass
@click.command()
@click.option('--partner_id', '-id', help='Facebook Data for Good Partner ID.')
def set_partner_id(partner_id):
config.set_partner_id(partner_id)
@click.command()
def get_partner_id():
partner_id = config.get_partner_id()
if partner_id == "":
print("No partner ID. Set your partner ID with set-partner-id.")
else:
print(f"Partner ID: {partner_id}")
@click.command()
@click.option('--alias_key', '-key', help='Key for dataset alias.')
@click.option('--alias_value', '-value', help='Value for dataset alias.')
def set_alias(alias_key, alias_value):
config.set_alias(alias_key, alias_value)
@click.command()
@click.option('--alias_key', '-key', help='Key for dataset alias.')
def delete_alias(alias_key):
config.delete_alias(alias_key)
@click.command()
@click.option('--alias_key', '-key', help='Key for dataset alias.')
def get_alias(alias_key):
alias_value = config.get_alias(alias_key)
print(f"Alias: {alias_key} = {alias_value}")
@click.command()
def get_aliases():
alias_keys = config.get_aliases()
print(f"\nDataset Aliases: \n\n - {alias_keys}\n")
@click.command()
@click.option('--dataset_id', '-id', help='ID or Alias of Dataset')
@click.option('--start_date', '-start_date', help='Dataset start date (YYYY-MM-DD)')
@click.option('--end_date', '-end_date', help='Dataset end date (YYYY-MM-DD)')
@click.option('--debug', '-debug', default=False, is_flag=True)
def download(dataset_id: str,
start_date: str,
end_date: str,
debug: bool = False):
if dataset_id is None:
dataset_id = utils.get_dataset_id_auto()
if start_date is None:
start_date = utils.get_start_date_auto(dataset_id)
else:
start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
if end_date is None:
end_date = datetime.datetime.now()
else:
end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
path = os.getcwd()
zip_fn = path + f"/{dataset_id}_data.zip"
if start_date > end_date:
raise Exception("start_date must precede end_date.")
download_len = (end_date - start_date).days
download_date_seq = [start_date + datetime.timedelta(days=x) for x in range(download_len)]
processing_sections = utils.chunks(download_date_seq, 30)
# TODO: double check that this gets all data in original range
# and that no files are excluded (through main integration tests)
for section in processing_sections:
iteration_start_date = min(section)
iteration_end_date = max(section)
main.query_data(dataset_id=dataset_id,
start_date=iteration_start_date,
end_date=iteration_end_date,
zip_fn=zip_fn,
debug=debug)
main.unpack_data(dataset_id, zip_fn, path)
os.remove(zip_fn)
cli.add_command(set_partner_id)
cli.add_command(get_partner_id)
cli.add_command(set_alias)
cli.add_command(delete_alias)
cli.add_command(get_alias)
cli.add_command(get_aliases)
cli.add_command(download)
| StarcoderdataPython |
1795622 | from ..Base import Base
from ..info import Info
from ..apk import register
from ..tools import *
TITLE = 'SQL injection detection'
LEVEL = 2
INFO = 'Detect whether there are usage conditions for SQL injection in the App'
class SQLInjectCheck(Base):
def scan(self):
strline = cmdString('grep -r "Landroid/database/sqlite/SQLiteDatabase" ' + self.appPath)
paths = getSmalis(os.popen(strline).readlines())
results = []
for path in paths:
with open(path, 'r') as f:
lines = f.readlines()
count = len(lines)
name = getFileName(path)
for i in range(0, count):
line = lines[i]
if '?' in line and 'const-string' in line:
v = line.strip().split(' ')[1]
for j in range(i, count):
ll = lines[j]
if v in ll and (
'Landroid/database/sqlite/SQLiteDatabase;->rawQuery' in ll or 'Landroid/database/sqlite/SQLiteDatabase;->execSQL' in ll):
result = name + ' : ' + str(j + 1)
if result not in results:
results.append(result)
break
Info(key=self.__class__, title=TITLE, level=LEVEL, info=INFO, result='\n'.join(results)).description()
register(SQLInjectCheck) | StarcoderdataPython |
291966 | class Point2D():
def __init__(self, x,y):
self.coord = [x,y]
def __str__(self):
return f'Point:({self.coord[0]},{self.coord[1]})'
def __eq__(self,other):
return (self.coord[0]==other.coord[0])&(self.coord[1]==other.coord[1])
def __ne__(self,other):
return (self.coord[0]!=other.coord[0])&(self.coord[1]!=other.coord[1])
def __gt__(self,other):
return (self.distance()>other.distance())
def __le__(self,other):
return (self.distance()<=other.distance())
def __ge__(self,other):
return (self.distance()>=other.distance())
def __ne__(self,other):
return (self.coord[0]!=other.coord[0])&(self.coord[1]!=other.coord[1])
def distance(self):
return (self.coord[0]**2+self.coord[1]**2)**0.5
if __name__=='__main__':
point1 = Point2D(1,1) | StarcoderdataPython |
8007935 | """empty message
Revision ID: 94a5b5198867
Revises:
Create Date: 2022-03-12 00:15:21.130502
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "94a5b5198867"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("counter", sa.Column("value", sa.Integer(), nullable=False))
op.drop_column("counter", "name")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"counter",
sa.Column("name", sa.VARCHAR(length=255), autoincrement=False, nullable=False),
)
op.drop_column("counter", "value")
# ### end Alembic commands ###
| StarcoderdataPython |
1950833 | <filename>Code/StatusCatalog.py
# === Imports ==================================================================
# Custom imports
from . import GlobalConstants as GC
from . import configuration as cf
from . import static_random
from . import CustomObjects, ActiveSkill, HelpMenu
from . import Utility, Engine
from . import HealthBar
import logging
logger = logging.getLogger(__name__)
# === New Status Object ========================================================
class Status(object):
next_uid = 100
def __init__(self, s_id, name, components, desc, image_index=None):
self.uid = Status.next_uid
Status.next_uid += 1
self.id = s_id
self.name = name
self.desc = desc
self.image_index = image_index or (0, 0)
self.owner_id = None # Like item_owner but for statuses
self.giver_id = None # Who created/gave away this status
self.data = {} # Stores persistent data that needs to be kept across saves
self.children = set()
# Creates component slots
self.components = components # Consumable, Weapon, Spell Bigger Picture
for component_key, component_value in self.components.items():
self.__dict__[component_key] = component_value
self.loadSprites()
def __str__(self):
return self.id
def __repr__(self):
return self.id
def add_child(self, child):
self.children.add(child)
def remove_child(self, child):
self.children.discard(child)
def removeSprites(self):
self.image = None
self.cooldown = None
if self.upkeep_animation:
self.upkeep_animation.removeSprites()
if self.always_animation:
self.always_animation.removeSprites()
if self.activated_item and self.activated_item.item:
self.activated_item.item.removeSprites()
def loadSprites(self):
self.image = Engine.subsurface(GC.ITEMDICT['Skills'], (16*self.image_index[0], 16*self.image_index[1], 16, 16)) if self.image_index else None
self.cooldown = GC.IMAGESDICT['IconCooldown']
if self.upkeep_animation:
self.upkeep_animation.loadSprites()
if self.always_animation:
self.always_animation.loadSprites()
if self.activated_item and self.activated_item.item:
self.activated_item.item.loadSprites()
self.help_box = None
def serialize(self):
serial_dict = {}
serial_dict['uid'] = self.uid
serial_dict['id'] = self.id
serial_dict['time_left'] = self.time.time_left if self.time else None
serial_dict['upkeep_sc_count'] = self.upkeep_stat_change.count if self.upkeep_stat_change else None
if self.activated_item:
self.data['activated_item'] = self.activated_item.current_charge
if self.combat_art:
self.data['combat_art'] = self.combat_art.current_charge
serial_dict['children'] = self.children
serial_dict['owner_id'] = self.owner_id
serial_dict['giver_id'] = self.giver_id
# serial_dict['count'] = self.count.count if self.count else None
serial_dict['stat_halve_penalties'] = self.stat_halve.penalties if self.stat_halve else None
serial_dict['aura_child_uid'] = self.aura.child_status.uid if self.aura else None
serial_dict['data'] = self.data
return serial_dict
def draw(self, surf, topleft, cooldown=True):
if self.image:
surf.blit(self.image, topleft)
# Cooldown
if cooldown:
if self.combat_art:
self.draw_cooldown(surf, topleft, self.combat_art.current_charge, self.combat_art.charge_max)
elif self.activated_item:
self.draw_cooldown(surf, topleft, self.activated_item.current_charge, self.activated_item.charge_max)
elif self.count:
self.draw_cooldown(surf, topleft, self.count.count, self.count.orig_count)
def draw_cooldown(self, surf, topleft, current, total):
if total <= 0:
return
index = int(current*8//total)
if index >= 8:
pass
else:
chosen_cooldown = Engine.subsurface(self.cooldown, (16*index, 0, 16, 16))
surf.blit(chosen_cooldown, topleft)
def get_help_box(self):
if not self.help_box:
self.help_box = HelpMenu.Help_Dialog(self.desc)
return self.help_box
# If the attribute is not found
def __getattr__(self, attr):
if attr.startswith('__') and attr.endswith('__'):
return super(Status, self).__getattr__(attr)
return None
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__.update(d)
class TimeComponent(object):
def __init__(self, time_left):
self.time_left = int(time_left)
self.total_time = self.time_left
def decrement(self):
self.time_left -= 1
def increment(self):
self.time_left += 1
self.time_left = min(self.time_left, self.total_time)
class UpkeepStatChangeComponent(object):
def __init__(self, change_in_stats):
self.stat_change = change_in_stats
self.count = 0
class HPPercentageComponent(object):
def __init__(self, percentage):
self.percentage = int(percentage)
class ConditionalComponent(object):
def __init__(self, name, value, conditional):
self.name = name
self.value = value
self.conditional = conditional
def __repr__(self):
return self.value
class StatHalveComponent(object):
def __init__(self, line):
self.stats = line.split(',')
self.penalties = [None for _ in self.stats]
class CountComponent(object):
def __init__(self, orig_count):
self.orig_count = int(orig_count)
self.count = int(orig_count)
class UpkeepAnimationComponent(object):
def __init__(self, animation_name, x, y, num_frames):
self.animation_name = animation_name
self.x = int(x)
self.y = int(y)
self.num_frames = int(num_frames)
def removeSprites(self):
self.sprite = None
def loadSprites(self):
self.sprite = GC.IMAGESDICT[self.animation_name]
class AlwaysAnimationComponent(object):
def __init__(self, animation_name, x, y, num_frames):
self.animation_name = animation_name
self.x = int(x)
self.y = int(y)
self.num_frames = int(num_frames)
self.frameCount = 0
self.lastUpdate = Engine.get_time()
self.animation_speed = 150
def removeSprites(self):
self.image = None
self.sprite = None
def loadSprites(self):
self.sprite = GC.IMAGESDICT[self.animation_name]
self.image = Engine.subsurface(self.sprite, (0, 0, self.sprite.get_width()//self.x, self.sprite.get_height()//self.y))
class UnitTintComponent(object):
def __init__(self, data):
color1, color2, color3, period, width, max_alpha = data.split(',')
self.color = (int(color1), int(color2), int(color3))
self.period = int(period)
self.width = int(width)
self.max_alpha = float(max_alpha)
# === STATUS PROCESSOR =========================================================
class Status_Processor(object):
def __init__(self, gameStateObj, upkeep=True):
# Initial setup
self.upkeep = upkeep # Whether I'm running this on upkeep or on endstep
self.current_phase = gameStateObj.phase.get_current_phase()
self.previous_phase = gameStateObj.phase.get_previous_phase()
affected_units = [unit for unit in gameStateObj.allunits if unit.position and unit.status_effects]
if self.upkeep:
self.units = [unit for unit in affected_units if unit.team == self.current_phase]
else:
self.units = [unit for unit in affected_units if unit.team == self.previous_phase]
logger.info('Building Status_Processor: %s %s %s', self.upkeep, self.current_phase, self.previous_phase)
# State control
self.current_unit = None
self.current_unit_statuses = []
self.current_status = None
self.status_index = 0
self.state = CustomObjects.StateMachine('begin')
self.state_buffer = False
# Animation properties
self.time_spent_on_each_status = 1200 # Only if it has a onetime animation
self.start_time_for_this_status = Engine.get_time()
# Health bar
self.health_bar = HealthBar.HealthBar('splash', None, None)
# Waiting timer
self.wait_time = 200
self.started_waiting = Engine.get_time()
def update(self, gameStateObj):
from . import Action
current_time = Engine.get_time()
# Beginning process
if self.state.getState() == 'begin':
if self.units:
self.current_unit = self.units.pop()
self.state.changeState('new_unit')
else:
return "Done" # Done
# New unit
elif self.state.getState() == 'new_unit':
# Get all statuses that could affect this unit
self.current_unit_statuses = self.current_unit.status_effects[:]
self.status_index = 0
# Get status
if self.current_unit_statuses:
self.health_bar.change_unit(self.current_unit, None)
self.state.changeState('new_status')
else: # This unit has no status to process. Return and get a new one
self.state.changeState('begin')
elif self.state.getState() == 'new_status':
if self.status_index < len(self.current_unit_statuses):
self.current_status = self.current_unit_statuses[self.status_index]
self.status_index += 1
# Returns true if status is going to be processed...
# Handle status
if self.upkeep:
output = HandleStatusUpkeep(self.current_status, self.current_unit, gameStateObj)
else:
output = HandleStatusEndStep(self.current_status, self.current_unit, gameStateObj)
if output == "Remove": # Returns "Remove" if status has run out of time and should just be removed
Action.do(Action.RemoveStatus(self.current_unit, self.current_status), gameStateObj)
self.state.changeState('new_status')
else:
self.oldhp = output[0]
self.newhp = output[1]
# If the hp_changed or the current status has a one time animation, run the process, otherwise, move onto next status
# Processing state handles animation and HP updating
if self.oldhp != self.newhp:
if self.newhp > self.oldhp:
GC.SOUNDDICT['MapHeal'].play()
logger.debug('HP change: %s %s', self.oldhp, self.newhp)
# self.health_bar.update()
self.start_time_for_this_status = current_time
gameStateObj.cursor.setPosition(self.current_unit.position, gameStateObj)
self.current_unit.sprite.change_state('status_active', gameStateObj)
self.state.changeState('processing')
gameStateObj.stateMachine.changeState('move_camera')
return "Waiting"
else:
self.state.changeState('new_status')
else: # This unit has no more status to process. Return and get a new unit
self.state.changeState('begin')
elif self.state.getState() == 'processing':
self.health_bar.update(status_obj=True)
# Turn on animations
for anim in gameStateObj.allanimations:
anim.on = True
# Done waiting for status, process next one
if current_time - self.start_time_for_this_status - self.health_bar.time_for_change + 400 > self.time_spent_on_each_status:
# handle death of a unit
if self.current_unit.currenthp <= 0:
self.current_unit.isDying = True
self.state.changeState('begin')
return "Death"
else:
self.state.changeState('new_status')
self.current_unit.sprite.change_state('normal', gameStateObj)
else:
return "Waiting"
elif self.state.getState() == 'wait':
# Done waiting, head back
if current_time - self.wait_time > self.started_waiting:
self.state.back()
else:
return "Waiting"
def check_active(self, unit):
if unit is self.current_unit and self.state.getState() == 'processing':
return True
return False
def draw(self, surf, gameStateObj):
# This is so it doesn't draw the first time it goes to processing, which is before the camera moves
if self.state_buffer:
self.health_bar.draw(surf, gameStateObj)
if self.state.getState() == 'processing':
self.state_buffer = True
else:
self.state_buffer = False
def check_automatic(status, unit, gameStateObj):
from . import Action
if status.combat_art and status.combat_art.is_automatic() and status.combat_art.check_charged():
status_obj = statusparser(status.combat_art.status_id, gameStateObj)
Action.do(Action.AddStatus(unit, status_obj), gameStateObj)
Action.do(Action.ResetCharge(status), gameStateObj)
def HandleStatusUpkeep(status, unit, gameStateObj):
from . import Action
oldhp = unit.currenthp
if status.time:
Action.do(Action.DecrementStatusTime(status), gameStateObj)
logger.info('Time Status %s to %s at %s. Time left: %s', status.id, unit.name, unit.position, status.time.time_left)
if status.time.time_left <= 0:
return "Remove" # Don't process. Status has no more effect on unit
elif status.remove_range:
p_unit = gameStateObj.get_unit_from_id(status.owner_id)
if not p_unit or not p_unit.position or not unit.position or Utility.calculate_distance(p_unit.position, unit.position) > status.remove_range:
return "Remove"
if status.hp_percentage:
hp_change = int(int(unit.stats['HP']) * status.hp_percentage.percentage/100.0)
Action.do(Action.ChangeHP(unit, hp_change), gameStateObj)
if status.upkeep_damage:
if ',' in status.upkeep_damage:
low_damage, high_damage = status.upkeep_damage.split(',')
old = static_random.get_other_random_state()
damage_dealt = static_random.get_other(int(low_damage), int(high_damage))
new = static_random.get_other_random_state()
Action.do(Action.RecordOtherRandomState(old, new), gameStateObj)
else:
damage_dealt = int(eval(status.upkeep_damage, globals(), locals()))
Action.do(Action.ChangeHP(unit, -damage_dealt), gameStateObj)
if status.upkeep_stat_change:
Action.do(Action.ApplyStatChange(unit, status.upkeep_stat_change.stat_change), gameStateObj)
Action.do(Action.ChangeStatusCount(status.upkeep_stat_change, status.upkeep_stat_change.count + 1), gameStateObj)
check_automatic(status, unit, gameStateObj)
if status.upkeep_animation and unit.currenthp != oldhp:
stota = status.upkeep_animation
if not stota.sprite:
logger.error('Missing upkeep animation sprite for %s', status.name)
else:
anim = CustomObjects.Animation(stota.sprite, (unit.position[0], unit.position[1] - 1), (stota.x, stota.y), stota.num_frames, on=False)
gameStateObj.allanimations.append(anim)
if status.upkeeps_movement:
if unit.team.startswith('enemy'):
gameStateObj.boundary_manager._remove_unit(unit, gameStateObj)
if unit.position:
gameStateObj.boundary_manager._add_unit(unit, gameStateObj)
return oldhp, unit.currenthp
def HandleStatusEndStep(status, unit, gameStateObj):
from . import Action
oldhp = unit.currenthp
if status.endstep_stat_change:
Action.do(Action.ApplyStatChange(unit, status.endstep_stat_change.stat_change), gameStateObj)
Action.do(Action.ChangeStatusCount(status.endstep_stat_change, status.endstep_stat_change.count + 1), gameStateObj)
if status.lost_on_endstep:
Action.do(Action.RemoveStatus(unit, status), gameStateObj)
return oldhp, unit.currenthp
# === STATUS PARSER ======================================================
# Takes one status id, as well as the database of status data, and outputs a status object.
def statusparser(s_id, gameStateObj=None):
for status in GC.STATUSDATA.getroot().findall('status'):
if status.find('id').text == s_id:
components = status.find('components').text
if components:
components = components.split(',')
else:
components = []
name = status.get('name')
desc = status.find('desc').text
image_index = status.find('image_index').text if status.find('image_index') is not None else None
if image_index:
image_index = tuple(int(num) for num in image_index.split(','))
else:
image_index = (0, 0)
my_components = {}
for component in components:
if component == 'time':
time = status.find('time').text
my_components['time'] = TimeComponent(time)
elif component == 'stat_change':
my_components['stat_change'] = Utility.intify_comma_list(status.find('stat_change').text)
my_components['stat_change'].extend([0] * (cf.CONSTANTS['num_stats'] - len(my_components['stat_change'])))
elif component == 'growth_mod':
my_components['growth_mod'] = Utility.intify_comma_list(status.find('growth_mod').text)
my_components['growth_mod'].extend([0] * (cf.CONSTANTS['num_stats'] - len(my_components['growth_mod'])))
elif component == 'upkeep_stat_change':
stat_change = Utility.intify_comma_list(status.find('upkeep_stat_change').text)
stat_change.extend([0] * (cf.CONSTANTS['num_stats'] - len(stat_change)))
my_components['upkeep_stat_change'] = UpkeepStatChangeComponent(stat_change)
elif component == 'endstep_stat_change':
stat_change = Utility.intify_comma_list(status.find('endstep_stat_change').text)
stat_change.extend([0] * (cf.CONSTANTS['num_stats'] - len(stat_change)))
my_components['endstep_stat_change'] = UpkeepStatChangeComponent(stat_change)
# Combat changes
elif component.startswith('conditional_'):
value, conditional = status.find(component).text.split(';')
my_components[component] = ConditionalComponent(component, value, conditional)
# Others...
elif component == 'nihil':
if status.find('nihil') is not None and status.find('nihil').text is not None:
my_components['nihil'] = status.find('nihil').text.split(',')
else:
my_components['nihil'] = ['All']
elif component == 'stat_halve':
my_components['stat_halve'] = StatHalveComponent(status.find('stat_halve').text)
elif component == 'count':
my_components['count'] = CountComponent(int(status.find('count').text))
elif component == 'remove_range':
my_components['remove_range'] = int(status.find('remove_range').text)
elif component == 'buy_value_mod':
my_components['buy_value_mod'] = float(status.find('buy_value_mod').text)
elif component == 'hp_percentage':
percentage = status.find('hp_percentage').text
my_components['hp_percentage'] = HPPercentageComponent(percentage)
elif component == 'upkeep_animation':
info_line = status.find('upkeep_animation').text
split_line = info_line.split(',')
my_components['upkeep_animation'] = UpkeepAnimationComponent(split_line[0], split_line[1], split_line[2], split_line[3])
elif component == 'always_animation':
info_line = status.find('always_animation').text
split_line = info_line.split(',')
my_components['always_animation'] = AlwaysAnimationComponent(split_line[0], split_line[1], split_line[2], split_line[3])
elif component == 'unit_tint':
info_line = status.find('unit_tint').text
my_components['unit_tint'] = UnitTintComponent(info_line)
elif component == 'item_mod':
conditional = status.find('item_mod_conditional').text if status.find('item_mod_conditional') is not None else None
effect_add = status.find('item_mod_effect_add').text.split(';') if status.find('item_mod_effect_add') is not None else None
effect_change = status.find('item_mod_effect_change').text.split(';') if status.find('item_mod_effect_change') is not None else None
my_components['item_mod'] = ActiveSkill.ItemModComponent(s_id, conditional, effect_add, effect_change)
elif component == 'aura':
from . import Aura
aura_range = int(status.find('range').text)
child = status.find('child').text
target = status.find('target').text
my_components['aura'] = Aura.Aura(aura_range, target, child, gameStateObj)
elif component == 'combat_art' or component == 'automatic_combat_art':
mode = ActiveSkill.Mode.ACTIVATED if component == 'combat_art' else ActiveSkill.Mode.AUTOMATIC
child_status = status.find('combat_art_status').text if status.find('combat_art_status') is not None else None
charge_method = status.find('charge_method').text if status.find('charge_method') is not None else 'SKL'
charge_max = int(status.find('charge_max').text) if status.find('charge_max') is not None else 60
valid_weapons_func = status.find('valid_weapons_func').text if status.find('valid_weapons_func') is not None else 'weapons'
check_valid_func = status.find('check_valid_func').text if status.find('check_valid_func') is not None else 'True'
my_components['combat_art'] = ActiveSkill.CombatArtComponent(mode, child_status, valid_weapons_func, check_valid_func, charge_method, charge_max)
elif component == 'activated_item' or component == 'quick_activated_item':
can_still_act = component == 'quick_activated_item'
activated_item_id = status.find('activated_item').text if status.find('activated_item') is not None else None
charge_method = status.find('charge_method').text if status.find('charge_method') is not None else 'SKL'
charge_max = int(status.find('charge_max').text) if status.find('charge_max') is not None else 0
check_valid_func = status.find('check_valid_func').text if status.find('check_valid_func') is not None else 'True'
get_choices_func = status.find('get_choices_func').text if status.find('get_choices_func') is not None else 'None'
my_components['activated_item'] = ActiveSkill.ActivatedItemComponent(activated_item_id, check_valid_func, get_choices_func, charge_method, charge_max, can_still_act)
elif component in ('attack_proc', 'defense_proc', 'attack_pre_proc', 'defense_pre_proc', 'adept_proc'):
child_status = status.find('proc_status').text if status.find('proc_status') is not None else None
charge_method = status.find('proc_rate').text if status.find('proc_rate') is not None else 'SKL'
priority = int(status.find('proc_priority').text) if status.find('proc_priority') is not None else 10
my_components[component] = ActiveSkill.ProcComponent(child_status, charge_method, priority)
elif status.find(component) is not None and status.find(component).text:
my_components[component] = status.find(component).text
else:
my_components[component] = True
currentStatus = Status(s_id, name, my_components, desc, image_index)
if gameStateObj:
gameStateObj.register_status(currentStatus)
# Otherwise already registered
return currentStatus
def deserialize(s_dict):
status = statusparser(s_dict['id'])
if not status:
return
status.uid = s_dict['uid']
if s_dict['time_left'] is not None:
status.time.time_left = s_dict['time_left']
if s_dict.get('count') is not None:
status.count.count = s_dict['count']
if s_dict['upkeep_sc_count'] is not None:
status.upkeep_stat_change.count = s_dict['upkeep_sc_count']
if s_dict['stat_halve_penalties'] is not None:
status.stat_halve.penalties = s_dict['stat_halve_penalties']
if s_dict['aura_child_uid'] is not None:
status.aura.child_uid = s_dict['aura_child_uid']
status.children = set(s_dict['children'])
status.owner_id = s_dict['owner_id']
status.giver_id = s_dict['giver_id']
status.data = s_dict.get('data', {}) # Get back persistent data
if status.data.get('activated_item') is not None:
status.activated_item.current_charge = status.data['activated_item']
if status.data.get('combat_art') is not None:
status.combat_art.current_charge = status.data['combat_art']
return status
def attach_to_unit(status, unit, gameStateObj):
"""
Done (on load) after loading both the unit and the status to attach
the status correctly to the unit after a suspend.
"""
if status.item_mod:
for item in unit.items:
status.item_mod.apply_mod(item, gameStateObj)
unit.status_effects.append(status)
unit.status_bundle.update(list(status.components))
# Populate feat_list
def get_feat_list(status_data):
feat_list = []
for status in status_data.getroot().findall('status'):
components = status.find('components').text
if components:
components = components.split(',')
else:
components = []
if 'feat' in components:
feat_list.append(status.find('id').text)
return feat_list
feat_list = get_feat_list(GC.STATUSDATA)
| StarcoderdataPython |
8110295 | <filename>projektrouska/migrations/0005_auto_20210104_2014.py
# Generated by Django 3.1 on 2021-01-04 19:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('projektrouska', '0004_auto_20210104_0341'),
]
operations = [
migrations.AddField(
model_name='nuts4',
name='district',
field=models.ForeignKey(default=1,
on_delete=django.db.models.deletion.CASCADE,
to='projektrouska.district'),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='district',
unique_together={('id', 'region', 'state')},
),
migrations.RemoveField(
model_name='district',
name='nuts4',
),
]
| StarcoderdataPython |
3230897 | import pprint
import argparse
import numpy as np
import yaml
import random
import os
import csv
import json
import train
import gridworld
def generate_random_grid(base, num_event_cells, period_range, bound, mode='linear', stack=True, event_region=None, extra_event_region=[]):
min_period, max_period = period_range
free_spaces = np.argwhere(base == 0) if event_region is None else event_region
np.random.shuffle(free_spaces)
cells = []
for n in range(num_event_cells):
obj = gridworld.Object(x=free_spaces[n, 1], y=free_spaces[n, 0], period=random.randint(min_period, max_period),
bound=bound)
cells.append(obj)
pos = (free_spaces[num_event_cells, 1], free_spaces[num_event_cells, 0])
person = None
if mode == "person":
person = gridworld.Person((free_spaces[num_event_cells, 1], free_spaces[num_event_cells, 0]))
cells = [gridworld.Object(x=free_spaces[n, 1], y=free_spaces[n, 0], period=random.randint(min_period, max_period),
bound=bound) for n in range(len(free_spaces))]
gw = gridworld.GridWorld(base, cells, person=person, initialpos=pos, viewable_distance=0, mode=mode,
stack=stack, extra_event_region=extra_event_region)
return gw
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Shaping Experiment')
parser.add_argument('-c', '--config', help='Config File', default=None)
parser.add_argument('-f', '--csv', help='CSV File', default="results.csv")
args = parser.parse_args()
if not args.config:
config = {
'mode': 'person',
'bound': 1,
'average_reward_learning_rate': 0.0001,
'eval_period': 1000,
'exploration_sched_timesteps': 10000,
'strategy_file': 'Example1_Perm_readable.json',
'replay_buffer_size': 100000,
'perfect_knowledge': False
}
else:
with open(args.config, 'r') as f:
config = yaml.load(f)
# Print config
pprint.pprint(config)
# Sheild
strategy_file = config.get("strategy_file", None)
w_dict = None
if strategy_file:
w_dict = {}
following_region = [[] for x in range(225)]
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), strategy_file), 'r') as f:
strategy = json.load(f)
for num, state in strategy.items():
successors = []
for successor in state["Successors"]:
succ_state = strategy[str(successor)]["State"]
successors.append((succ_state["s"], succ_state["st"]))
w_dict[(state["State"]["s"], state["State"]["st"])] = successors
if state["State"]["st"] < 225:
following_region[state["State"]["st"]].append(np.unravel_index(state["State"]["s"], [15, 15]))
# Visibility
invisibility_file = 'iset.json'
invisibility_dict = {}
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), invisibility_file), 'r') as f:
invisibility = json.load(f)
for s1, s2 in invisibility.items():
invisibility_dict[int(s1)] = s2
with open(args.csv, 'w', newline='') as csvfile:
fieldnames = ['TYPE', 'ADT', 'DPS', 'TOTALDETECTIONS', 'TOTALSTEPS', 'TOTALEVENTS', 'NUMVISIBLE']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
img = np.zeros([15, 15])
img[0:6, 7] = 1
img[9:15, 7] = 1
img[5][0:5] = 1
img[5][6] = 1
img[5][10:15] = 1
img[5][8] = 1
img[9][0:5] = 1
img[9][6] = 1
img[9][10:15] = 1
img[9][8] = 1
print (img)
if config.get('perfect_knowledge', True):
gw = generate_random_grid(img, 188, (10, 20), config.get('bound', 1), mode=config.get('mode', 'linear'),
stack=True)
else:
gw = generate_random_grid(img, 188, (10, 20), config.get('bound', 1), mode=config.get('mode', 'linear'),
stack=True, extra_event_region = [(r, c) for r in range(6, 9) for c in range(0, 15)])
strategy = (w_dict, following_region)
gw.invisibility = invisibility_dict
# RL
gw.reset()
eval_period = config.get('eval_period', 20000)
np.set_printoptions(precision=3, suppress=True, linewidth=150)
def sliding_window_eval_fn(env, policy, q_func, vizgrid, num_iters):
adt = env.gw.get_adt()
#dps = env.gw.get_dps()
dps = (env.gw.num_detections - env.gw.prev_num_detections) / eval_period
print(num_iters, "ADT: ", adt, "\tDPS: ", dps, "\tDetections: ", env.gw.num_detections, \
"\tTotal Timesteps: ", env.gw.timestep, "\tTotal Events: ", env.gw.total_num_events,
"\tVisible: ", env.gw.person.viewable_counts)
writer.writerow({'TYPE': str(num_iters),
'ADT': adt,
'DPS': dps,
'TOTALDETECTIONS': env.gw.num_detections,
'TOTALSTEPS': env.gw.timestep,
'TOTALEVENTS': env.gw.total_num_events,
'NUMVISIBLE': env.gw.person.viewable_counts})
csvfile.flush()
def region_distance(pos, region, gw):
def manhattan_dist(pos1, pos2):
return abs(pos1[0] - pos2[0]) + abs(pos1[1] - pos2[1])
min_dist = gw.fw.distance(pos, (region[0][1], region[0][0])) #region is row/column
min_cell = region[0]
for cell in region[1:]:
dist = gw.fw.distance(pos, (cell[1], cell[0]))
if dist < min_dist:
min_dist = dist
min_cell = cell
return min_dist
def get_mask_person_shaping(spec, gw, pos, person_pos):
if gw.person_viewable(pos, person_pos): # the person is visible now
spec_dict, following_region = spec
ind_person = person_pos[1] * gw.grid.shape[1] + person_pos[0]
dist_curr = region_distance(pos, following_region[ind_person], gw)
phi_mask = np.full(len(gw.actions), -dist_curr)
for action in range(len(gw.actions)):
target = gw.get_target(action, pos)
if not gw.check_target(target, pos):
continue
dist_next = region_distance(target, following_region[ind_person], gw)
if dist_next < dist_curr:
phi_mask[action] += 1
if dist_next == 0:
phi_mask[action] = 0
else:
phi_mask = np.full(len(gw.actions), -6)
return phi_mask
def get_mask_person_shielding(spec, gw, pos, person_pos):
spec_dict, following_region = spec
shield_neginf_mask = np.full(len(gw.actions), -np.inf)
ind_person = person_pos[1] * gw.grid.shape[1] + person_pos[0]
if gw.person_viewable(pos, person_pos): # but the person is visible now
for action in range(len(gw.actions)):
target = gw.get_target(action, pos)
ind_robot_next = target[1] * gw.grid.shape[1] + target[0]
if (ind_robot_next, ind_person) in spec_dict:
shield_neginf_mask[action] = 0
else:
print("Lost the person while shielding!")
return shield_neginf_mask
def get_mask_person_pos(gw, method_type, spec, pos, person_pos):
if not method_type:
mask = np.zeros(len(gw.actions))
elif method_type == "shielding":
mask = get_mask_person_shielding(spec, gw, pos, person_pos)
elif method_type == "shaping":
mask = get_mask_person_shaping(spec, gw, pos, person_pos)
return mask
writer.writerow({'TYPE': 'Shaping'})
csvfile.flush()
print("Shaping")
train.run(config, gw, strategy, "shaping", eval_period, sliding_window_eval_fn, get_mask_person_pos)
gw.reset()
writer.writerow({'TYPE': 'Baseline'})
csvfile.flush()
print("Baseline")
train.run(config, gw, strategy, None, eval_period, sliding_window_eval_fn, get_mask_person_pos)
gw.reset()
writer.writerow({'TYPE': 'Shielding'})
csvfile.flush()
print("Shielding")
train.run(config, gw, strategy, "shielding", eval_period, sliding_window_eval_fn, get_mask_person_pos)
gw.reset()
| StarcoderdataPython |
1695342 | <reponame>wenhanshi/project-in-BUPT
import socket
import struct
import queue
import sys
import getopt
import random
import threading
dns_data = {} # file that contains DNS info
dns_server = '10.3.9.5' # remote DNS server
port = 53 # DNS port
query_buf = queue.Queue() # DNS query queue
ans_buf = queue.Queue() # DNS response queue
file_name = 'dnsrelay.txt' # default file
id_dict = {} # id : ( src_id, src_addr )
# send DNS query to remote DNS server if ip not found in dns_data
# 127.0.0.1 : 5000 -> 10.x.x.x : 53
def send_dns_frame(server, frame, addr):
global dns_server, port, id_dict
id = struct.unpack('!H', frame[0:2])
# if the id exists
if id[0] in id_dict:
# make new IP
# e.g.
# 23333 : (2, 10.128.102.40:50050)
# 44678 : (4, 10.128.105.21:52199)
# ...
new_id = (2 * id[0] + random.randint(0, 65535)) % 65536
frame = struct.pack('!H', new_id) + frame[2::]
id_dict[new_id] = (id[0], addr)
# remain the original IP
else:
id_dict[id[0]] = (id[0], addr)
print('[SEND QUERY]', get_qname(frame))
server.sendto(frame, (dns_server, port))
# deal with query_buf
# server receive a response from remote DNS server -> put the response into ans_buf
# caution: we can NOT reply the response received
# $nslookup need answer from port 53 ! so just put them in the ans_buf
def handle_dns_ans():
global id_dict, query_buf, ans_buf
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.bind(('', 50000))
s.settimeout(1)
while True:
try:
# query frame in buffers waiting for handle
while not query_buf.empty():
frame, addr = query_buf.get()
send_dns_frame(s, frame, addr)
# receive answer and resend (put them in ans_buf)
data, addr = s.recvfrom(1024)
print('[RESPONSE]', get_ip(data))
# mapping id : id -> ( src_id, src_addr )
id = struct.unpack('!H', data[0:2])
src_id, src_addr = id_dict[id[0]]
data = struct.pack('!H', src_id) + data[2::]
# put response into ans_buf
ans_buf.put((data, src_addr))
# update id_dict
id_dict.pop(id[0])
except:
# no answer or timeout
# do next while
print('[TIMEOUT] No response from remote DNS server timeout.')
# response -> a.b.c.d
def get_ip(dns_frame):
ip1, ip2, ip3, ip4 = struct.unpack('!BBBB', dns_frame[-4::])
return str(ip1) + '.' + str(ip2) + '.' + str(ip3) + '.' + str(ip4)
# map b'3www5baidu3com' -> 'www.baidu.com'
def get_qname(dns_frame):
segs = []
q_name_bytes = dns_frame[12:-2] # e.g. b'3www5baidu3com'
i = 0
count = q_name_bytes[0]
while count != 0:
segs.append(q_name_bytes[i + 1:i + count + 1].decode('ascii'))
i += count + 1
count = q_name_bytes[i]
# get 'www.baidu.com'
return '.'.join(segs)
# look up ip with domain name
# www.baidu.com -> a.b.c.d
def look_up(q_name):
global dns_data
if q_name not in dns_data:
return ''
else:
return dns_data[q_name]
# creating DNS answer frame if ip is found in dns_data
def create_ans_frame(dns_frame, ans_ip, filtered):
'''
------
Header : IP, flags, question_count, answer RRs, authority RRs, additional RRs
------
Queries : name, type, class
------
Answers : name, type, class, ttl, data_length, address
------
Flags :
* answer : 0x8180 (1000 0001 1000 0000)
[is response, recursion desired and available]
* query : 0x0100 (0000 0001 0000 0000)
[standard query, recursion desired]
'''
# Header
id = dns_frame[0:2] # copy from query
q_count = b'\x00\x01' # same as query
if not filtered:
flags = b'\x81\x80' # diff from query & response [2:4], the same [5::]
ans_RRs = b'\x00\x01' # default : only 1 answer, un-auth
else:
flags = b'\x81\x83' # rcode = 3, domain does not exist
ans_RRs = b'\x00\x00' # no RRs
auth_RRs = b'\x00\x00' # default : no auth RR
add_RRs = b'\x00\x00' # default : no add RR
header = id + flags + q_count + ans_RRs + auth_RRs + add_RRs
# Queries
queries = dns_frame[12::] # the same as DNS query
# Answers
name = b'\xc0\x0c' # name pointer
type = b'\x00\x01' # A
a_class = b'\x00\x01' # IN
ttl = struct.pack('!L', 46) # default
data_length = struct.pack('!H', 4) # *.*.*.*
ip_num = ans_ip.split('.') # *.*.*.* -> hex(*), hex(*), ...
address = struct.pack('!BBBB', int(ip_num[0]), int(ip_num[1]), int(ip_num[2]), int(ip_num[3]))
answers = name + type + a_class + ttl + data_length + address
return header + queries + answers
# create local DNS server with UDP
# binding 127.0.0.1 : 53
# listening DNS queries from clients, or DNS answers from remote DNS server
# send DNS answers to clients if name is found in table
# 10.x.x.x : xxxxx <- 127.0.0.1 : 53 (server) or
# 127.0.0.1 : xxxxx <- 127.0.0.1 : 53 (local)
def create_dns_server():
global dns_server, port, ans_buf, query_buf
print('Creating local DNS server...(UDP)')
print('Remote DNS server: [%s]' % dns_server)
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
print('Binding socket [%s:%s]...' % ('127.0.0.1', port))
s.settimeout(1)
s.bind(('', port)) # can not be 127.0.0.1 explicitly
while True:
try:
# deal with ans_buf
while not ans_buf.empty():
ans, src_addr = ans_buf.get()
s.sendto(ans, src_addr)
data, addr = s.recvfrom(1024)
flags = data[2:4]
type = data[-4:-2]
# only standard query with IN and A
if flags == b'\x01\x00' and type == b'\x00\x01':
print('[DNS QUERY FROM]', addr)
q_name = get_qname(data)
print('[Q_NAME]', q_name)
ans_ip = look_up(q_name)
if ans_ip == '0.0.0.0':
ans = create_ans_frame(data, ans_ip, filtered=True)
# send to client immediately, no need to buffer
s.sendto(ans, addr)
print('[NOTE] Domain does not exist.')
# normal IP, just response it directly
elif len(ans_ip) > 0:
print('Domain [%s] \'s IP has been found, is [%s]' % (q_name, ans_ip))
ans = create_ans_frame(data, ans_ip, filtered=False)
# send to client immediately, no need to buffer
s.sendto(ans, addr)
print('[CREATED RESPONSE]', ans)
# put queries into query_buf, waiting for sending
else:
print('IP not found. Sending query to [%s]' % dns_server)
query_buf.put((data, addr))
except:
# connection can always be denied or timeout
# just ignore the exception and do next while
print('[TIMEOUT] No query from client timeout.')
# reading DNS data file
def init_dns_file():
global file_name, dns_data
with open(file_name, 'r') as f:
for line in f.readlines():
line = line.replace('\n', '')
if len(line) > 0:
ip = line.split(' ')[0]
name = line.split(' ')[1]
dns_data[name] = ip
print('Loading data files success.')
print('Items: ', len(dns_data))
# get paras from console
# $ python dns_server.py -d 10.3.9.5 -f dnsrelay.txt
# default : file_name='dnsrelay.txt', dns_server = '10.3.9.5'
def get_opt():
global file_name, dns_server
opts, args = getopt.getopt(sys.argv[1::], 'd:f:')
for op, value in opts:
if op == '-d':
dns_server = value
elif op == '-f':
file_name = value
else:
print('Error : unknown paras, use default settings.')
if __name__ == '__main__':
get_opt()
init_dns_file()
handle_query = threading.Thread(target=create_dns_server, args=(), name='role_server').start()
handle_ans = threading.Thread(target=handle_dns_ans, args=(), name='role_client').start()
| StarcoderdataPython |
1861085 | <reponame>earthcomfy/recipe-api
from .base import *
DEBUG = True
ALLOWED_HOSTS = ['localhost']
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
| StarcoderdataPython |
297886 | <reponame>ollfkaih/lego
class ParserException(Exception):
"""
Base exception for the email parser.
"""
pass
class ParseEmailException(ParserException):
"""
Raised when the parser can't create a email.message.Message object of the raw string or bytes.
"""
pass
class MessageIDNotExistException(ParserException):
"""
Raised when a message not contain a message-id
"""
pass
class DefectMessageException(ParserException):
"""
Raised when the recieved message is defect.
"""
pass
| StarcoderdataPython |
5098950 | <filename>tests/test_lambda_integration.py
from io import BytesIO, StringIO
import requests
import lambda_requests
from tests.base import (
BINARY_PAYLOAD,
HTTP_URL_PREFIX,
LAMBDA_URL_PREFIX,
UNICODE_PAYLOAD,
PatcherBase,
)
def _seek_reset_request(accessor, url_path, *args, **kwargs):
if "files" in kwargs:
for file_item in kwargs["files"]:
kwargs["files"]["file"].seek(0)
return accessor(url_path, *args, **kwargs)
class TestLambdaIntegration(PatcherBase):
def setUp(self):
self.http_accessor = requests.Session()
self.lambda_accessor = lambda_requests.Session()
def post_both(self, url_path, *args, **kwargs):
return (
_seek_reset_request(
self.http_accessor.post, HTTP_URL_PREFIX + url_path, *args, **kwargs
),
_seek_reset_request(
self.lambda_accessor.post, LAMBDA_URL_PREFIX + url_path, *args, **kwargs
),
)
def get_both(self, url_path, *args, **kwargs):
return (
_seek_reset_request(
self.lambda_accessor.get, LAMBDA_URL_PREFIX + url_path, *args, **kwargs
),
_seek_reset_request(
self.http_accessor.get, HTTP_URL_PREFIX + url_path, *args, **kwargs
),
)
def test_binary_file(self):
"""
Send binary file via gateway and lambda invoke to echo service
ensure we get same status code and content back.
"""
with BytesIO(BINARY_PAYLOAD) as test_buffer:
kwargs = {"files": {"file": test_buffer}}
responses = self.post_both("/file", **kwargs)
assert responses[0].status_code == responses[1].status_code
assert responses[0].content == responses[1].content
def test_unicode_file(self):
"""
Send unicode file via gateway and lambda invoke to echo service
ensure we get same status code and content back.
"""
with StringIO(UNICODE_PAYLOAD) as test_buffer:
kwargs = {"files": {"file": test_buffer}}
responses = self.post_both("/file", **kwargs)
assert responses[0].status_code == responses[1].status_code == 200
assert responses[0].content == responses[1].content
def test_path_parameter(self):
responses = self.get_both("/test/foo")
print(responses[0].status_code)
print(responses[1].status_code)
assert responses[0].status_code == responses[1].status_code == 200
assert responses[0].json()["param"] == responses[1].json()["param"]
def test_form_object(self):
form_data = {"foo": "bar"}
responses = self.post_both("/test/form", data=form_data)
assert responses[0].status_code == responses[1].status_code == 200
assert responses[0].json()["form"] == responses[1].json()["form"]
def test_query_string(self):
param_data = {"foo": "bar"}
responses = self.get_both("/test/form", params=param_data)
assert responses[0].status_code == responses[1].status_code == 200
assert (
responses[0].json()["query_strings"] == responses[1].json()["query_strings"]
)
def test_custom_header(self):
header_data = {"foo": "bar"}
responses = self.get_both("/test/form", headers=header_data)
assert responses[0].status_code == responses[1].status_code == 200
assert responses[0].json()["headers"].lower().find("foo") > 0
assert responses[0].json()["headers"].lower().find("bar") > 0
assert responses[1].json()["headers"].lower().find("foo") > 0
assert responses[1].json()["headers"].lower().find("bar") > 0
# def test_get
# def test_head
# def test_post
# def test_patch
# def test_post
# def test_delete
| StarcoderdataPython |
4935608 | import math
n = int(input("Enter the number till where the series ius to be printed = "))
for i in range(1,n+1):
k = math.pow(i,3)
j = k + 2*i
print(j)
| StarcoderdataPython |
1667247 | <reponame>mrunibe/MIALab
from tensorflow.python.platform import app
import os
import argparse
import sys
import numpy as np
sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), '..')) # append the MIALab root directory to Python path
# fixes the ModuleNotFoundError when executing main.py in the console after code changes (e.g. git pull)
# somehow pip install does not keep track of packages
import bin.df_crossval_main as runval
# n_trees=np.array([3,4,6,10,20,40,80])
# n_nodes=np.array([100,200,400,800,1500,2000])
n_trees=np.array([40,80])
n_nodes=np.array([100,200])
if __name__ == "__main__":
"""The program's entry point."""
script_dir = os.path.dirname(sys.argv[0])
parser = argparse.ArgumentParser(description='Medical image analysis pipeline for brain tissue segmentation')
parser.add_argument(
'--model_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, './mia-model')),
help='Base directory for output models.'
)
parser.add_argument(
'--result_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, './mia-result')),
help='Directory for results.'
)
parser.add_argument(
'--data_atlas_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, '../data/atlas')),
help='Directory with atlas data.'
)
parser.add_argument(
'--data_train_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, '../data/train/')),
help='Directory with training data.'
)
parser.add_argument(
'--data_test_dir',
type=str,
default=os.path.normpath(os.path.join(script_dir, '../data/test/')),
help='Directory with testing data.'
)
FLAGS, unparsed = parser.parse_known_args()
for t in n_trees:
for n in n_nodes:
runval.main(FLAGS,t,n)
| StarcoderdataPython |
1760074 | from torchvision import datasets, transforms
import math
import random
import torch
import copy
import csv
from datetime import datetime
import os
import numpy as np
from torch.utils.data import Dataset, DataLoader
from torch import nn
from trainer import Tester
class Utils():
def get_dataset_dist(self, args):
if args.dataset == "mnist":
data_dir = "./data/mnist"
transform = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Lambda(lambda x: x.repeat(3, 1, 1)),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
train_dataset = datasets.MNIST(
root=data_dir,
train=True,
download=True,
transform=transform
)
test_dataset = datasets.MNIST(
root=data_dir,
train=False,
download=True,
transform=transform
)
elif args.dataset == "cifar10":
data_dir = "./data/cifar10"
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_dataset = datasets.CIFAR10(
root=data_dir,
train=True,
download=True,
transform=transform
)
test_dataset = datasets.CIFAR10(
root=data_dir,
train=False,
download=True,
transform=transform
)
if args.learning == "f":
if args.iid:
user_idxs = self.iid_dist(train_dataset, args)
else:
user_idxs = self.noniid_dist(train_dataset, args)
else:
user_idxs = []
return train_dataset, test_dataset, user_idxs
def iid_dist(self, dataset, args):
data_per_device = math.floor(len(dataset)/args.num_devices)
idxs = list(range(len(dataset))) # Put all data index in list
users_idxs = [[] for i in range(args.num_devices)] # Index dictionary for devices
random.shuffle(idxs)
for i in range(args.num_devices):
users_idxs[i] = idxs[i*data_per_device:(i+1)*data_per_device]
if args.max_data_per_device:
users_idxs[i] = users_idxs[i][0:args.max_data_per_device]
return users_idxs
def noniid_dist(self, dataset, args):
if args.class_per_device: # Use classes per device
if args.class_per_device > len(dataset.classes):
raise OverflowError("Class per device is larger than number of classes")
if args.equal_dist:
raise NotImplementedError("Class per device can only be used with unequal distributions")
else:
current_classs = 0
users_classes = [[] for i in range(args.num_devices)] # Classes dictionary for devices
classes_devives = [[] for i in range(len(dataset.classes))] # Devices in each class
# Distribute class numbers to devices
for i in range(args.num_devices):
next_current_class = (current_classs+args.class_per_device)%len(dataset.classes)
if next_current_class > current_classs:
users_classes[i] = np.arange(current_classs, next_current_class)
else:
users_classes[i] = np.append(
np.arange(current_classs, len(dataset.classes)),
np.arange(0, next_current_class)
)
for j in users_classes[i]:
classes_devives[j].append(i)
current_classs = next_current_class
# Combine indexes and labels for sorting
idxs_labels = np.vstack((np.arange(len(dataset)), np.array(dataset.targets)))
idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]
users_idxs = [[] for i in range(args.num_devices)] # Index dictionary for devices
current_idx = 0
for i in range(len(dataset.classes)):
if not len(classes_devives[i]):
continue
send_to_device = 0
for j in range(current_idx, len(idxs_labels[0])):
if idxs_labels[1, j] != i:
current_idx = j
break
users_idxs[classes_devives[i][send_to_device]].append(idxs_labels[0, j])
send_to_device = (send_to_device+1)%len(classes_devives[i])
if args.max_data_per_device:
for i in range(args.num_devices):
users_idxs[i] = users_idxs[i][0:args.max_data_per_device]
"""
# Validate results
tmp_list = []
for i in range(args.num_devices):
tmp_list = list(set(tmp_list) | set(users_idxs[i]))
print(len(tmp_list))
sum = 0
for i in range(args.num_devices):
sum += len(users_idxs[i])
print(sum)
"""
return users_idxs
else: # Use non-IIDness
if args.equal_dist:
data_per_device = math.floor(len(dataset)/args.num_devices)
users_idxs = [[] for i in range(args.num_devices)] # Index dictionary for devices
# Combine indexes and labels for sorting
idxs_labels = np.vstack((np.arange(len(dataset)), np.array(dataset.targets)))
idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]
idxs = idxs_labels[0, :].tolist()
niid_data_per_device = int(data_per_device*args.noniidness/100)
# Distribute non-IID data
for i in range(args.num_devices):
users_idxs[i] = idxs[i*niid_data_per_device:(i+1)*niid_data_per_device]
# Still have some data
if args.num_devices*niid_data_per_device < len(dataset):
# Filter distributed data
idxs = idxs[args.num_devices*niid_data_per_device:]
# Randomize data after sorting
random.shuffle(idxs)
remaining_data_per_device = data_per_device-niid_data_per_device
# Distribute IID data
for i in range(args.num_devices):
users_idxs[i].extend(idxs[i*remaining_data_per_device:(i+1)*remaining_data_per_device])
"""
# Validate results
for i in users_idxs[0]:
print(idxs_labels[1, np.where(idxs_labels[0] == i)])
sum = 0
for i in range(args.num_devices):
sum += len(users_idxs[i])
print(sum)
"""
if args.max_data_per_device:
for i in range(args.num_devices):
users_idxs[i] = users_idxs[i][0:args.max_data_per_device]
return users_idxs
else:
# Max data per device
max = math.floor(len(dataset)/args.num_devices)
# Each device get [0.2*max, max) amount of data
data_per_device = [int(random.uniform(max/5, max)) for i in range(args.num_devices)]
users_idxs = [[] for i in range(args.num_devices)] # Index dictionary for devices
# Combine indexes and labels for sorting
idxs_labels = np.vstack((np.arange(len(dataset)), np.array(dataset.targets)))
idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]
idxs = idxs_labels[0, :].tolist()
niid_data_per_device = [int(data_per_device[i]*args.noniidness/100) for i in range(args.num_devices)]
current_idx = 0
# Distribute non-IID data
for i in range(args.num_devices):
users_idxs[i] = idxs[current_idx:current_idx+niid_data_per_device[i]]
current_idx += niid_data_per_device[i]
# Filter distributed data
idxs = idxs[current_idx:]
# Randomize data after sorting
random.shuffle(idxs)
remaining_data_per_device = [data_per_device[i]-niid_data_per_device[i] for i in range(args.num_devices)]
current_idx = 0
# Distribute IID data
for i in range(args.num_devices):
users_idxs[i].extend(idxs[current_idx:current_idx+remaining_data_per_device[i]])
current_idx += remaining_data_per_device[i]
if args.max_data_per_device:
for i in range(args.num_devices):
users_idxs[i] = users_idxs[i][0:args.max_data_per_device]
"""
# Validate results
tmp_list = []
for i in range(args.num_devices):
tmp_list = list(set(tmp_list) | set(users_idxs[i]))
print(len(tmp_list))
sum = 0
for i in range(args.num_devices):
sum += len(users_idxs[i])
print(sum)
"""
return users_idxs
# Non-weighted averaging...
def fed_avg(self, weights):
w = copy.deepcopy(weights[0]) # Weight from first device
for key in w.keys():
for i in range(1, len(weights)): # Other devices
w[key] += weights[i][key] # Sum up weights
w[key] = torch.div(w[key], len(weights)) # Get average weights
return w
def cal_avg_weight_diff(self, weights_list, avg_weights):
w = copy.deepcopy(weights_list)
w2 = copy.deepcopy(avg_weights)
key = list(w2.keys())
for key in list(w2.keys()):
w2[key] = w2[key].reshape(-1).tolist() # Reshape to 1d tensor and transform to list
# List for differences: for all devices, get the average of abs((val(device)-val(average))/val(average))
diff_list = []
print("\n\tWeight difference:")
for key in list(w2.keys()):
tmp2 = []
for i in range(len(w)):
tmp = []
w[i][key] = w[i][key].reshape(-1).tolist() # Reshape to 1d tensor and transform to list
for j in range(len(w[i][key])):
tmp.append(abs((w[i][key][j]-w2[key][j])/w2[key][j])) # Abs((val(device)-val(average))/val(average))
average = sum(tmp)/len(tmp) # Calculate average
tmp2.append(average)
print(f"\t\tWeight difference | Weight {i + 1} | {key} | {average}")
average = sum(tmp2)/len(tmp2) # Calculate average
diff_list.append(average)
return sum(diff_list)/len(diff_list)
def save_results_to_file(self, args, avg_weights_diff,
global_train_losses, global_test_losses,
global_accuracies, aucs,
kappas):
iid = "iid" if args.iid else "niid"
# Create folder if it doesn't exist
if not os.path.exists("results"):
os.mkdir("results")
f = open(f"./results/{datetime.now()}_{args.dataset}_{args.optim}_{iid}_{args.noniidness}_{args.equal_dist}_{args.class_per_device}.csv", "w")
with f:
if args.learning == "f":
fnames = ["round", "average weight differences",
"train losses", "test losses", "test accuracies",
"average AUCs", "Kappas"]
else:
fnames = ["round", "train losses",
"test losses", "test accuracies",
"average AUCs", "Kappas"]
writer = csv.DictWriter(f, fieldnames=fnames)
writer.writeheader()
for i in range(len(global_train_losses)):
if args.learning == "f":
writer.writerow({
"round": i+1,
"average weight differences": avg_weights_diff[i],
"train losses": global_train_losses[i],
"test losses": global_test_losses[i],
"test accuracies": global_accuracies[i],
"average AUCs": aucs[i],
"Kappas": kappas[i]
})
else:
writer.writerow({
"round": i+1,
"train losses": global_train_losses[i],
"test losses": global_test_losses[i],
"test accuracies": global_accuracies[i],
"average AUCs": aucs[i],
"Kappas": kappas[i]
})
print(f"Results stored in results/{datetime.now()}_{args.dataset}_{args.optim}_{iid}_{args.noniidness}_{args.equal_dist}_{args.class_per_device}.csv")
def warmup_model(self, model, train_dataset, test_dataset, device, args):
model.to(device)
model.train() # Train mode
print("Training warmup model...")
if args.optim == "sgd":
optimizer = torch.optim.SGD(
model.parameters(),
lr=args.lr,
momentum=args.sgd_momentum
)
elif args.optim == "adagrad":
optimizer = torch.optim.Adagrad(
model.parameters(),
lr=args.lr
)
else:
optimizer = torch.optim.Adam(
model.parameters(),
lr=args.lr
)
dataloader = DataLoader(
train_dataset,
batch_size=args.bs,
shuffle=True
)
loss_function = nn.CrossEntropyLoss().to(device)
for idx, (data, target) in enumerate(dataloader):
model.train() # Train mode
data, target = data.to(device), target.to(device)
model.zero_grad()
output = model(data)
loss = loss_function(output, target)
loss.backward()
optimizer.step()
if idx % 40 == 0:
accuracy, loss = Tester().test(
test_dataset,
-1,
device,
copy.deepcopy(model),
args
)
print(f"Accuracy reaches {accuracy}")
if accuracy >= 0.6:
break
print(f"Trained warmup model to accuracy {accuracy}!")
return copy.deepcopy(model.state_dict())
| StarcoderdataPython |
8079874 | <filename>Chapter09/Edge_detection_Kernel.py
#Edge dection kernel
#Built with SciPy
#Copyright 2018 <NAME> MIT License. READ LICENSE.
import matplotlib.image as mpimg
import numpy as np
import scipy.ndimage.filters as filter
import matplotlib.pyplot as plt
#I.An edge dectection kernel
kernel_edge_detection = np.array([[0.,1.,0.],
[1.,-4.,1.],
[0.,1.,0.]])
#II.Load image
image=mpimg.imread('img.bmp')[:,:,0]
shape = image.shape
print("image shape",shape)
#III.Convolution
image_after_kernel = filter.convolve(image,kernel_edge_detection,mode='constant', cval=0)
#III.Displaying the image before and after the convolution
f = plt.figure(figsize=(8, 8))
axarr=f.subplots(2,sharex=False)
axarr[0].imshow(image,cmap=plt.cm.gray)
axarr[1].imshow(image_after_kernel,cmap=plt.cm.gray)
f.show()
print("image before convolution")
print(image)
print("image after convolution")
print(image_after_kernel)
| StarcoderdataPython |
5059151 | import warnings
warnings.filterwarnings("ignore")
import re
from nltk import word_tokenize, pos_tag
import pandas as pd
import numpy as np
from profanity_check import predict, predict_prob
from emot import emoji, emoticons
from bs4 import BeautifulSoup
from flair.models import TextClassifier, SequenceTagger
from flair.data import Sentence
from langdetect import detect
def emoji_and_emoticon_count(string):
num = 0
for emotes in [emoji(string), emoticons(string)]:
try:
if emotes['flag']:
num += len(emotes)
for value, meaning in zip(emotes['value'], emotes['mean']):
string = string.replace(value, meaning)
except:
pass
return {'content': string, 'num_emoji': num}
def num_urls(string):
urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', string)
for url in urls:
string = string.replace(url, "url")
return {'content': string, 'num_url': len(urls)}
def num_profane_words(string):
try:
words = string.split()
profanity = predict(words)
return np.sum(profanity)
except:
print(string)
print(type(string))
exit(0)
def detect_lang(string):
try:
return detect(string)
except:
return 'not a string'
def upper_case_density(string):
total = 0
upper = 0
for char in string:
if char.islower():
total += 1
if char.isupper():
total += 1
upper += 1
return upper/(total + 10**(-6))
def num_pronouns(string):
num = 0
for word, tag in pos_tag(word_tokenize(string)):
if tag == 'PRP':
num += 1
return num
data = pd.read_json("Data/01/01.json.bz2", lines=True)
data['content'] = data['text']
data = data[['content']]
data = data.dropna()
print(data.shape)
data['language'] = data['content'].apply(detect_lang)
data = data[data['language'] == 'en']
print(data.shape)
url_df = pd.DataFrame(data['content'].map(num_urls).to_list())
data['content'] = url_df['content']
data['num_url'] = url_df['num_url']
# data['content'] = data['content'].map(lambda string: BeautifulSoup(string, features="html.parser").text)
# data['content'] = data['content'].map(lambda string: string.replace("&;", "'"))
emoji_df = pd.DataFrame(data['content'].map(emoji_and_emoticon_count).to_list())
data['content'] = emoji_df['content']
data['num_emoji'] = emoji_df['num_emoji']
is_string = data['content'].apply(lambda string: type(string) == type(""))
data = data[is_string]
print(data.shape)
data['profane_words'] = data['content'].map(num_profane_words)
data = data[data['profane_words'] >= 0]
print(data.shape)
data['profanity_score'] = predict_prob(data['content'].to_numpy())
data['num_exclamation_question'] = data['content'].map(lambda string: string.count("!") + string.count("?"))
data['num_stops'] = data['content'].map(lambda string: string.count("."))
data['num_dash'] = data['content'].map(lambda string: string.count("-"))
data['num_star_dollar'] = data['content'].map(lambda string: string.count("*") + string.count("$"))
data['num_ampersand'] = data['content'].map(lambda string: string.count("&"))
data['num_hashtags'] = data['content'].map(lambda string: string.count("#"))
data['num_usertags'] = data['content'].map(lambda string: string.count("@"))
data['upper_case_density'] = data['content'].map(upper_case_density)
flair_sentiment = TextClassifier.load('en-sentiment')
sentences = data['content'].map(lambda string: Sentence(string)).to_list()
flair_sentiment.predict(sentences)
sign = {'POSITIVE': 1, 'NEGATIVE': -1}
sentiments = [sign[sentence.labels[0].value]*sentence.labels[0].score for sentence in sentences]
data['sentiment'] = pd.Series(sentiments)
data['length'] = data['content'].map(lambda string: len(string))
data['num_pronoun'] = data['content'].map(num_pronouns)
data.to_json('data_anannotated.json')
print(data.head)
print(data.columns)
print(data.size)
| StarcoderdataPython |
6616474 | import numpy as np
from matplotlib.path import Path
class BinaryRenderer:
def __init__(self, size):
self.map = np.zeros(size, dtype=bool)
self.xx, self.yy = np.meshgrid(range(size[1]), range(size[0]))
self.xy = np.vstack((self.xx.flatten(), self.yy.flatten())).T
def circle(self, x, y, radius, value=True):
x0 = max(int(x - radius), 0)
y0 = max(int(y - radius), 0)
x1 = min(int(x + radius) + 2, self.map.shape[1] - 1)
y1 = min(int(y + radius) + 2, self.map.shape[0] - 1)
roi = self.map[y0:y1, x0:x1]
sqr_dist = (self.xx[y0:y1, x0:x1] - x)**2 + (self.yy[y0:y1, x0:x1] - y)**2
roi[sqr_dist <= radius**2] = value
self.map[y0:y1, x0:x1] = roi
def polygon(self, points, value=True):
x0 = max(int(points[:, 0].min()), 0)
y0 = max(int(points[:, 1].min()), 0)
x1 = min(int(points[:, 0].max()) + 2, self.map.shape[1] - 1)
y1 = min(int(points[:, 1].max()) + 2, self.map.shape[0] - 1)
xy = np.vstack((self.xx[y0:y1, x0:x1].flatten(), self.yy[y0:y1, x0:x1].flatten())).T
roi = self.map[y0:y1, x0:x1]
roi[Path(points).contains_points(xy).reshape(roi.shape)] = value
self.map[y0:y1, x0:x1] = roi
| StarcoderdataPython |
1979634 | <filename>package/no_of_letters_in_string.py
def nols():
a=input('enter string ')
b=input('enter letter ').strip()
count=0
for i in range(len(a)):
if(a[i]==b):
count+=1
print(count)
nols()
| StarcoderdataPython |
5020499 | from pyjokes.jokes_en import jokes_en
from pyjokes.jokes_de import jokes_de
from pyjokes.jokes_es import jokes_es
from pyjokes.jokes_gl import jokes_gl
from pyjokes.jokes_eu import jokes_eu
from pyjokes.jokes_it import jokes_it
from pyjokes.jokes_se import jokes_se
def _test_joke_length(joke):
assert len(joke) <= 140
def _test_joke_group(jokes):
for joke in jokes:
_test_joke_length(joke)
def test_jokes_lengths():
jokes_sets = [jokes_en, jokes_es, jokes_de, jokes_gl, jokes_eu, jokes_it, jokes_se]
for jokes in jokes_sets:
_test_joke_group(jokes["all"])
| StarcoderdataPython |
76667 | <filename>setup.py
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import platform
import re
from setuptools import setup, Extension
python_version = platform.python_version()
system_name = platform.system()
print("build for python{} on {}".format(python_version, system_name))
# Arguments
actrie_dir = ""
alib_dir = ""
def get_root_dir():
return os.path.dirname(os.path.realpath(__file__))
if not actrie_dir:
actrie_dir = get_root_dir()
if not alib_dir:
alib_dir = os.path.join(actrie_dir, 'deps', 'alib')
def build_library():
os.system(os.path.join(actrie_dir, "utils", "build.sh"))
# build_library()
warp_sources = [
os.path.join(actrie_dir, 'actrie', 'src', 'wrap.c')
]
compile_args = []
if system_name == "Windows":
compile_args.append("/utf-8")
else:
compile_args.append("-fno-strict-aliasing")
library_dirs = [
# os.path.join(alib_dir, 'lib'),
os.path.join(actrie_dir, 'lib')
]
libraries = ['actrie', 'alib']
include_dirs = [
os.path.join(alib_dir, 'include'),
os.path.join(actrie_dir, 'include')
]
actrie = Extension('actrie._actrie',
sources=warp_sources,
extra_compile_args=compile_args,
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=libraries)
kwds = {}
# Read version from bitarray/__init__.py
pat = re.compile(r'__version__\s*=\s*(\S+)', re.M)
data = open(os.path.join(actrie_dir, 'actrie', '__init__.py')).read()
kwds['version'] = eval(pat.search(data).group(1))
setup(name="actrie",
description="Aho-Corasick automation for large-scale multi-pattern matching.",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/ifplusor/actrie",
license="BSD",
packages=['actrie', 'actrie.example'],
ext_modules=[actrie],
classifiers=[
"Programming Language :: C",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: Implementation :: CPython",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Utilities"
],
keywords=["matcher", "trie", "aho-corasick automation", "ac-automation",
"string matching", "string search", "string matcher"],
zip_safe=False,
**kwds)
| StarcoderdataPython |
1899630 | <gh_stars>0
# ------------------------------------------------------------------- #
# Copyright (c) 2007-2008 Hanzo Archives Limited. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
# You may find more information about Hanzo Archives at #
# #
# http://www.hanzoarchives.com/ #
# #
# You may find more information about the WARC Tools project at #
# #
# http://code.google.com/p/warc-tools/ #
# ------------------------------------------------------------------- #
class WTypes:
"A class to define Python WARC classes types"
def __init__(self):
self.WNone = 0
self.WFile = 1
self.WRecord = 2
self.WBloc = 3
self.AFile = 4
self.ARecord = 5
## end ##
| StarcoderdataPython |
3469065 | <filename>RPI/Master/co2Sensor.py
# ***************************************************
# * By Group 5 @ HSLU I.BSCI.HS18.IOT *
# ****************************************************
import serial
import time
class Co2Sensor:
def __init__(self):
self.arduino_serial = None
self.initialized = False
def setup_arduino(self, serialPort):
'''Setup Communication to Arduino connected as a serial device.'''
# Device may change if there is another serial device connected!
# Use ls /dev/tty* to check new USB Serial devices.
try:
self.arduino_serial = serial.Serial(serialPort, 9600, timeout = 3)
except Exception as e:
print('Co2Sensor: Serial Port not available, Exception: ' + str(e))
return False
if self.arduino_serial.is_open:
time.sleep(5)
self.initialized = True
return True
else:
try:
self.arduino_serial.open()
# Arduino is being resetted, wait until available
time.sleep(5)
self.initialized = True
return True
except Exception as e:
print('Co2Sensor: Arduino not initialized, Exception: ' + str(e))
return False
def read_co2(self):
'''Retrieve Air Quality Level (1, 2, 3). Returns 0 if an illegal value is received by the Arduino.'''
# Tell Arduino to start reading
# Encode as Bytes first
try:
self.arduino_serial.write('read_co2'.encode('utf-8'))
response = self.arduino_serial.readline()
except Exception as e:
print('Co2Sensor: Cannot write / read to Arduino Serial, Exception: ' + str(e))
self.initialized = False
# Remove '\r\n' and decode Bytes back to String
response = response.rstrip().decode('utf-8')
if self._is_int(response):
return int(response)
else:
print('Co2Sensor: Not reading Co2 data')
return 0
def _is_int(self, s):
'''Internal: Check whether a string can be casted to an int.'''
try:
int(s)
return True
except ValueError:
return False
| StarcoderdataPython |
4998120 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@文件 :data_interface.py
@说明 :数据接口
@时间 :2020/07/29 14:13:14
@作者 :Riven
@版本 :1.0.0
'''
import abc
'''
定义一个接口
'''
class DataInterface(metaclass=abc.ABCMeta):
# For Test
# path = ' 123543'
# def __init__(self):
# self.path = '123'
@abc.abstractclassmethod
def save(self, save_data, path=None):
# raise NotImplementedError
pass
@abc.abstractclassmethod
def fetch(self, fetch_data=None, path=None):
# raise NotImplementedError
pass | StarcoderdataPython |
6589883 | from spyd.registry_manager import register
from spyd.permissions.functionality import Functionality
from spyd.game.client.exceptions import GenericError
from spyd.game.command.command_base import CommandBase
@register("command")
class RoomCommand(CommandBase):
name = "room"
functionality = Functionality("spyd.game.commands.room.execute", "You do not have permission to execute {action#command}", command=name)
usage = "<room name>"
description = "Join a specified room."
@classmethod
def execute(cls, spyd_server, room, client, command_string, arguments, raw_args):
if len(arguments) < 1:
raise GenericError("Please specify a room name.")
room_name = arguments[0]
target_room = room.manager.get_room(room_name, True)
if target_room is None:
raise GenericError("Could not resolve {value#room_name} to a room. Perhaps create it with {action#room_create}", room_name=room_name, room_create='room_create')
room.manager.client_change_room(client, target_room)
| StarcoderdataPython |
5156849 | <reponame>rog-works/lf3py
from types import ModuleType
from typing import Any, Dict, List, Optional, Tuple
from typing_extensions import Protocol
from lf3py.app.app import App
from lf3py.lang.dict import deep_merge
from lf3py.lang.module import import_module
from lf3py.lang.sequence import first, flatten, last
from lf3py.middleware import Middleware
from lf3py.routing.symbols import IRouter
from lf3py.task.types import Runner
class Generator(Protocol):
def generate(self, bps: List[App]) -> Any:
raise NotImplementedError()
class Discovery:
def __init__(self, filepaths: List[str]) -> None:
self._bps = self.__discover(list(filepaths))
def __discover(self, filepaths: List[str]) -> List[App]:
paths = [self.__to_module_path(filepath) for filepath in filepaths]
searched = [self.__dirty_resolve_bp(path) for path in paths]
return [result for result in searched if result]
def __to_module_path(self, filepath: str) -> str:
return '.'.join('.'.join(filepath.split('.')[:-1]).split('/'))
def __dirty_resolve_bp(self, path: str) -> Optional[App]:
modules = import_module(path)
for module in modules.__dict__.values():
if hasattr(module, 'locate') and callable(module.locate) and hasattr(module.locate, '__self__'):
return module
return None
def generate(self, generator: 'Generator') -> Any:
return generator.generate(self._bps)
class RoutesGenerator:
def generate(self, bps: List[App]) -> dict:
return dict(flatten([self.__dirty_get_routes_to_tuple(bp) for bp in bps]))
def __dirty_get_routes_to_tuple(self, bp: App) -> List[Tuple[str, str]]:
routes = bp.locate(IRouter)._routes # FIXME dirty get routes
return [(dsn_spec, module_path) for dsn_spec, module_path in routes.items()]
class OpenApiGenerator:
def generate(self, bps: List[App]) -> dict:
schema = {}
for bp in bps:
schema = {**schema, **self.__gen_schema_from_bp(bp)}
return schema
def __gen_schema_from_bp(self, bp: App) -> dict:
middleware = bp.locate(Middleware)
routes = bp.locate(IRouter)._routes # FIXME dirty get routes
path = '.'.join(first(routes.values()).split('.')[:-1])
modules = import_module(path)
schema = {'paths': {}}
for spec, runner in self.__extract_runners(routes, modules).items():
schema['paths'] = deep_merge(schema['paths'], self.__gen_api_schema(spec, runner, middleware))
return schema
def __extract_runners(self, routes: dict, modules: ModuleType) -> Dict[str, Runner]:
extracted = {}
for spec, module_path in routes.items():
module_name = last(module_path.split('.'))
extracted[spec] = modules.__dict__[module_name]
return extracted
def __gen_api_schema(self, spec: str, runner: Runner, middleware: Middleware) -> dict:
api_schema = self.__gen_api_schema_from_middleware(middleware, runner)
return self.__gen_api_schema_from_runner(spec, runner, api_schema)
def __gen_api_schema_from_middleware(self, middleware: Middleware, runner: Runner) -> dict:
attaches, caches = middleware._attaches.get(runner, []), middleware._catches.get(runner, [])
elems = flatten([attaches, caches])
schema = {}
for elem in elems:
if hasattr(elem, '__openapi__'):
schema = deep_merge(schema, getattr(elem, '__openapi__'))
return schema
def __gen_api_schema_from_runner(self, spec: str, runner: Runner, api_schema: dict) -> dict:
method, path = spec.split(' ')
base_api_schema = {
'responses': {
200: {'description': '200 OK'},
}
}
return {
path: {
method.lower(): deep_merge(api_schema, base_api_schema)
}
}
| StarcoderdataPython |
9664981 | from bs4 import BeautifulSoup
def normalise(text):
no_html = BeautifulSoup(text).get_text()
return no_html
| StarcoderdataPython |
3552526 | <gh_stars>0
"""
Normalizes the audio of a video file.
"""
import argparse
import os
import subprocess
def ffprobe_duration(filename: str) -> float:
"""
Uses ffprobe to get the duration of a video file.
"""
cmd = f"static_ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 {filename}"
result = subprocess.check_output(cmd, shell=True, universal_newlines=True)
result = result.replace("\n", "").replace(" ", "")
return float(result)
def _is_media_file(filename: str) -> bool:
if not os.path.isfile(filename):
return False
ext = os.path.splitext(filename.lower())[1]
return ext in [".mp4", ".mkv", ".avi", ".mov", ".mp3", ".wav"]
def main():
parser = argparse.ArgumentParser(
description="Print video durations\n",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("vidfile", help="Path to vid file", nargs="?")
parser.add_argument("out", help="Path to vid file", nargs="?")
args = parser.parse_args()
path = args.vidfile or input("in vid file: ")
out = args.out or input("out vid file: ")
if len(os.path.dirname(out)):
os.makedirs(os.path.dirname(out), exist_ok=True)
assert _is_media_file(path), f"{path} is not a media file"
cmd = f'ffmpeg-normalize -f "{path}" -o "{out}" -c:a aac -b:a 192k'
print(f"Executing:\n {cmd}")
os.system(cmd)
if __name__ == "__main__":
main()
| StarcoderdataPython |
6604469 | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
__version__ = '0.1'
from os.path import dirname, abspath
from argparse import ArgumentParser
import re
parser = ArgumentParser(description='Update ChibiOS halconf and mcuconf from STM32CubeMX project files.')
parser.add_argument('cube', type=str, help='STM32CubeMX project file')
parser.add_argument('cfg', type=str, help='Chibios config files folder')
# Always enable
ALWAYS = ('PAL', 'EXTI')
# In case IPs don't match hal names, or if you want to override (ie: use SERIAL instead of UART driver)
HAL_TRANSLATE = (
('USB', 'USB_OTG_FS'),
('USB', 'USB_OTG_HS'),
('SDC', 'SDMMC'),
('TRNG', 'RNG'),
('WSPI', 'QUADSPI'),
('WDG', 'IWDG'),
('UART', 'USART')
)
DRIVER_TRANSLATE = (
('SDC', 'SDMMC'),
('SERIAL', r'U(S)?ART'),
('UART', r'U(S)?ART'),
('USB', 'OTG'),
('PWM', 'TIM'),
('ICU', 'TIM'),
('GPT', 'TIM'),
('WDG', 'IWDG'),
('WSPI', 'QUADSPI'),
('RNG', 'RNG')
)
RCC_TRANSLATE = (
('HPRE', 'HCLK'),
('PPRE1', 'APB1CLKDivider'),
('PPRE2', 'APB2CLKDivider'),
('SW', 'SYSCLKSource'),
('SDMMC1SEL', 'SDMMCClockSelection'),
('PLLM_VALUE', 'PLLM'),
('PLLN_VALUE', 'PLLN'),
('PLLQ_VALUE', 'PLLQ'),
('PLLSAIN_VALUE', 'PLLSAIN'),
('PLLSAIR_VALUE', 'PLLSAIR'),
('MCO1SEL', 'RCC_MCO1Source'),
('MCO2SEL', 'RCC_MCO2Source'),
('PLLI2SN_VALUE', 'PLLI2SN'),
)
def translate_hal(ip):
for h in HAL_TRANSLATE:
if re.search(h[1], ip, re.M):
return h[0]
return ip
def translate_driver(ip):
for d in DRIVER_TRANSLATE:
if re.search(d[0], ip, re.M):
return d[1]
return ip
def set_boolean_define(line, match, name, value):
if name in line and re.search(match, line, re.M):
if value == True:
line = line.replace('FALSE', 'TRUE')
else:
line = line.replace('TRUE', 'FALSE')
print(line.strip())
return line
def get_hal_devices(source):
out = []
for line in source:
if '#define HAL_USE_' in line:
l = line.split(' ')
dev = ('_').join(l[1].split('_')[2:])
if dev not in out:
out.append(dev)
return out
def get_enabled_drivers(source, hal_devices):
out = {}
for line in source:
if line.startswith('Mcu.IP'):
ip_only = re.search(r"^Mcu.IP\d+=((I2C|[A-Z]+_?)+)(\d)?", line) # Extract ip name separated from periph number
if ip_only:
dev = translate_hal(ip_only.group(1)) # periph name
dev_num = ip_only.group(3) # periph number
if dev in hal_devices:
if dev not in out.keys():
out[dev] = []
if dev_num:
out[dev].append(dev_num)
return out
def get_rcc(source):
out = {}
return out
def update_hal(source, drivers):
match = '#define HAL_USE_'
for i in range(len(source)):
line = source[i]
if line.startswith(match):
if "TRUE" in line:
source[i] = line.replace('TRUE', 'FALSE')
for d in drivers:
source[i] = set_boolean_define(source[i], match, d, True)
return source
def update_drivers(source, drivers):
for i in range(len(source)):
line = source[i]
if '_USE_' in line:
if 'TRUE' in line:
source[i] = line.replace('TRUE', 'FALSE')
for driver, instances in drivers.items():
if instances:
for inst in instances:
periph = translate_driver(driver)
match = 'STM32_{0}_USE_{1}{2}'.format(driver, periph, inst)
source[i] = set_boolean_define(source[i], match, driver, True)
else:
periph = translate_driver(driver)
match = 'STM32_{0}_USE_{1}'.format(driver, periph)
source[i] = set_boolean_define(source[i], match, driver, True)
return source
def update_rcc(source, rcc):
# TODO
return source
if __name__ == '__main__':
args = parser.parse_args()
cur_path = dirname(abspath(__file__))
halconf_path = args.cfg + '/halconf.h'
mcuconf_path = args.cfg + '/mcuconf.h'
with open(args.cube, 'r') as f:
project = f.readlines()
with open(halconf_path, 'r') as f:
halconf = f.readlines()
with open(mcuconf_path, 'r') as f:
mcuconf = f.readlines()
hal_devices = get_hal_devices(halconf)
enabled_drivers = get_enabled_drivers(project, hal_devices)
rcc = get_rcc(project)
for a in ALWAYS:
enabled_drivers[a] = []
# Update and save halconf
halconf = update_hal(halconf, enabled_drivers)
with open(halconf_path, 'w') as f:
f.write("".join(halconf))
# Update and save mcuconf drivers
mcuconf = update_drivers(mcuconf, enabled_drivers)
mcuconf = update_rcc(mcuconf, rcc)
with open(mcuconf_path, 'w') as f:
f.write("".join(mcuconf))
| StarcoderdataPython |
11257267 | """
Tests for views in eCommerce app.
"""
from unittest.mock import patch
from django.apps.registry import apps
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase
from ecommerce.cart import Cart
from ecommerce.models import Order
from pages.models import CustomPage
from tests.ecommerce.models import MockEcommerceProduct, MockEcommerceCategory
def get_json_carts(response):
"""Get rendered cart's templates from JsonResponse."""
return response.json()['header'], response.json()['table']
class Cart_(TestCase):
@classmethod
def setUpClass(cls):
"""Set up test models, get urls for tests."""
super().setUpClass()
cls.model = MockEcommerceProduct
category = MockEcommerceCategory.objects.create(name='Category')
cls.product1, _ = cls.model.objects.get_or_create(
name='Product one', price=10, category=category)
cls.product2, _ = cls.model.objects.get_or_create(
name='Product two', price=20, category=category)
cls.add_to_cart = reverse('cart_add')
cls.remove_from_cart = reverse('cart_remove')
cls.flush_cart = reverse('cart_flush')
cls.change_in_cart = reverse('cart_set_count')
cls.order_page = CustomPage.objects.create(h1='Order page', slug='order')
cls.patched_model = patch.object(apps, 'get_model', return_value=cls.model)
def setUp(self):
"""Instantiate a fresh cart for every test case."""
self.cart = Cart(self.client.session)
def test_add_to_cart_modifier(self):
"""
'Add to cart' view should return JsonResponse with rendered header cart and order table
containing added product.
"""
with self.patched_model:
response = self.client.post(
self.add_to_cart, {'quantity': 1, 'product': self.product1.id},
)
header_cart, order_table = get_json_carts(response)
self.assertIn('In cart: 1', header_cart)
self.assertIn('Total price: {}'.format(self.product1.price), header_cart)
self.assertIn(self.product1.name, order_table)
def test_remove_from_cart_modifier(self):
"""
'Remove from cart' view should return JsonResponse with rendered header cart and order table
without removed product.
"""
self.client.post(
self.add_to_cart,
{'quantity': 1, 'product': self.product1.id}
)
self.client.post(
self.add_to_cart,
{'quantity': 2, 'product': self.product2.id}
)
response = self.client.post(
self.remove_from_cart,
{'product': self.product1.id}
)
header_cart, order_table = get_json_carts(response)
self.assertIn('In cart: 2', header_cart)
self.assertIn(self.product2.name, order_table)
response = self.client.post(
self.remove_from_cart,
{'product': self.product2.id}
)
header_cart, order_table = get_json_carts(response)
self.assertIn('The cart is empty', header_cart)
self.assertNotIn(self.product2.name, order_table)
def test_flush_cart_modifier(self):
"""'Flush cart' view should return JsonResponse with empty header cart and order table."""
self.client.post(self.add_to_cart, {'quantity': 1, 'product': self.product1.id})
response = self.client.post(self.flush_cart)
header_cart, order_table = get_json_carts(response)
self.assertIn('The cart is empty', header_cart)
self.assertNotIn(self.product1.name, order_table)
def test_change_cart_modifier(self):
"""
'Change in cart' view should return JsonResponse with header cart and order table
with changed quantity of a specific product.
"""
self.client.post(self.add_to_cart, {'quantity': 1, 'product': self.product1.id})
response = self.client.post(
self.change_in_cart, {'quantity': 42, 'product': self.product1.id})
header_cart, order_table = get_json_carts(response)
self.assertIn('In cart: 42', header_cart)
self.assertIn(self.product1.name, order_table)
def test_order_page_with_empty_cart(self):
"""Order page should be empty when the cart is empty."""
response = self.client.get(self.order_page.url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Positions in cart: 0')
def test_non_empty_cart_after_buy(self):
"""After some shopping we proceed to the order page and should see our cart."""
self.client.post(self.add_to_cart, {'quantity': 1, 'product': self.product1.id})
self.client.post(self.add_to_cart, {'quantity': 2, 'product': self.product2.id})
response = self.client.get(self.order_page.url)
self.assertContains(response, 'Positions in cart: 2')
self.assertContains(
response, 'Total cost: {}'.format(self.product1.price + 2 * self.product2.price))
class Order_(TestCase):
EMAIL = '<EMAIL>'
PHONE = '+7 (222) 222 22 22'
def setUp(self):
self.page = CustomPage.objects.create(slug='order')
self.success_page = CustomPage.objects.create(slug='order-success')
@property
def url(self):
return reverse(CustomPage.ROUTE, kwargs={'page': 'order'})
def prepare_cart(self):
category = MockEcommerceCategory.objects.create(name='Category')
product = MockEcommerceProduct.objects.create(
name='Product one', price=10, category=category
)
self.client.post(
reverse('cart_add'), {'quantity': 1, 'product': product.id},
)
def place_order(self, email='', phone='') -> None:
"""Do order. Requires prepared cart."""
email = email or self.EMAIL
phone = phone or self.PHONE
response = self.client.post(self.url, {'email': email, 'phone': phone})
self.assertEqual(302, response.status_code)
def test_save_to_db(self):
self.prepare_cart()
self.place_order()
count = (
Order.objects
.filter(email=self.EMAIL, phone=self.PHONE)
.count()
)
self.assertEqual(1, count)
def test_send_mail(self):
self.prepare_cart()
self.place_order()
self.assertEqual(len(mail.outbox), 1)
body = mail.outbox[0].body
self.assertIn(self.EMAIL, body)
self.assertIn(self.PHONE, body)
def test_order_success_page(self):
"""
Success page should show actual order.
Test was created because of this bug:
https://github.com/fidals/shopelectro/issues/678
"""
# first order placing
self.prepare_cart()
self.place_order('<EMAIL>')
# second order placing
self.prepare_cart()
response = self.client.post(
self.url,
{'email': '<EMAIL>', 'phone': self.PHONE},
follow=True
)
first, last = Order.objects.first(), Order.objects.last()
self.assertNotContains(response, str(first))
self.assertContains(response, str(last))
| StarcoderdataPython |
1814233 | <gh_stars>0
import json
import xml.etree.ElementTree as ET
def export(annotation_files, output_dir):
for annotation_file in annotation_files:
export_file(annotation_file, output_dir)
def export_file(annotation_file, output_dir):
xml = build_xml(annotation_file)
output_file_path = (output_dir / annotation_file.filename).with_suffix(".xml")
with open(output_file_path, "wb") as f:
f.write(ET.tostring(xml))
def build_xml(annotation_file):
root = ET.Element("annotation")
add_subelement_text(root, "folder", "images")
add_subelement_text(root, "filename", annotation_file.filename)
add_subelement_text(root, "path", f"images/{annotation_file.filename}")
source = ET.SubElement(root, "source")
add_subelement_text(source, "database", "darwin")
size = ET.SubElement(root, "size")
add_subelement_text(size, "width", str(annotation_file.image_width))
add_subelement_text(size, "height", str(annotation_file.image_height))
add_subelement_text(size, "depth", "3")
add_subelement_text(root, "segmented", "0")
for annotation in annotation_file.annotations:
if annotation.annotation_class.annotation_type != "bounding_box":
continue
data = annotation.data
sub_annotation = ET.SubElement(root, "object")
add_subelement_text(sub_annotation, "name", annotation.annotation_class.name)
add_subelement_text(sub_annotation, "pose", "Unspecified")
add_subelement_text(sub_annotation, "truncated", "0")
add_subelement_text(sub_annotation, "difficult", "0")
bndbox = ET.SubElement(sub_annotation, "bndbox")
add_subelement_text(bndbox, "xmin", str(round(data["x"])))
add_subelement_text(bndbox, "ymin", str(round(data["y"])))
add_subelement_text(bndbox, "xmax", str(round(data["x"] + data["w"])))
add_subelement_text(bndbox, "ymax", str(round(data["y"] + data["h"])))
return root
def add_subelement_text(parent, name, value):
sub = ET.SubElement(parent, name)
sub.text = value
return sub
def convert_file(path):
with open(path, "r") as f:
data = json.load(f)
return build_voc(data["image"], data["annotations"])
def save_xml(xml, path):
with open(path, "wb") as f:
f.write(ET.tostring(xml))
def build_voc(metadata, annotations):
print(metadata)
root = ET.Element("annotation")
add_subelement_text(root, "folder", "images")
add_subelement_text(root, "filename", metadata["original_filename"])
add_subelement_text(root, "path", f"images/{metadata['original_filename']}")
source = ET.SubElement(root, "source")
add_subelement_text(source, "database", "darwin")
size = ET.SubElement(root, "size")
add_subelement_text(size, "width", str(metadata["width"]))
add_subelement_text(size, "height", str(metadata["height"]))
add_subelement_text(size, "depth", "3")
add_subelement_text(root, "segmented", "0")
for annotation in annotations:
if "bounding_box" not in annotation:
continue
data = annotation["bounding_box"]
sub_annotation = ET.SubElement(root, "object")
add_subelement_text(sub_annotation, "name", annotation["name"])
add_subelement_text(sub_annotation, "pose", "Unspecified")
add_subelement_text(sub_annotation, "truncated", "0")
add_subelement_text(sub_annotation, "difficult", "0")
bndbox = ET.SubElement(sub_annotation, "bndbox")
add_subelement_text(bndbox, "xmin", str(round(data["x"])))
add_subelement_text(bndbox, "ymin", str(round(data["y"])))
add_subelement_text(bndbox, "xmax", str(round(data["x"] + data["w"])))
add_subelement_text(bndbox, "ymax", str(round(data["y"] + data["h"])))
return root
| StarcoderdataPython |
6511806 | <gh_stars>0
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from parse_rest.core import ResourceRequestLoginRequired, ParseError
from parse_rest.connection import API_ROOT
from parse_rest.datatypes import ParseResource, ParseType
from parse_rest.query import QueryManager
def login_required(func):
'''decorator describing User methods that need to be logged in'''
def ret(obj, *args, **kw):
if not hasattr(obj, 'sessionToken'):
message = '%s requires a logged-in session' % func.__name__
raise ResourceRequestLoginRequired(message)
return func(obj, *args, **kw)
return ret
class User(ParseResource):
'''
A User is like a regular Parse object (can be modified and saved) but
it requires additional methods and functionality
'''
ENDPOINT_ROOT = '/'.join([API_ROOT, 'users'])
PROTECTED_ATTRIBUTES = ParseResource.PROTECTED_ATTRIBUTES + [
'username', 'sessionToken', 'emailVerified']
def is_authenticated(self):
return self.sessionToken is not None
def authenticate(self, password=None, session_token=None):
if self.is_authenticated(): return
if password is not None:
self = User.login(self.username, password)
user = User.Query.get(objectId=self.objectId)
if user.objectId == self.objectId and user.sessionToken == session_token:
self.sessionToken = session_token
@login_required
def session_header(self):
return {'X-Parse-Session-Token': self.sessionToken}
@login_required
def save(self, batch=False):
session_header = {'X-Parse-Session-Token': self.sessionToken}
url = self._absolute_url
data = self._to_native()
response = User.PUT(url, extra_headers=session_header, batch=batch, **data)
def call_back(response_dict):
self.updatedAt = response_dict['updatedAt']
if batch:
return response, call_back
else:
call_back(response)
@login_required
def delete(self):
session_header = {'X-Parse-Session-Token': self.sessionToken}
return User.DELETE(self._absolute_url, extra_headers=session_header)
@classmethod
def signup(cls, username, password, **kw):
response_data = User.POST('', username=username, password=password, **kw)
response_data.update({'username': username})
return cls(**response_data)
@classmethod
def login(cls, username, passwd):
login_url = '/'.join([API_ROOT, 'login'])
return cls(**User.GET(login_url, username=username, password=<PASSWORD>))
@classmethod
def login_auth(cls, auth):
login_url = User.ENDPOINT_ROOT
return cls(**User.POST(login_url, authData=auth))
@classmethod
def current_user(cls):
user_url = '/'.join([API_ROOT, 'users/me'])
return cls(**User.GET(user_url))
@staticmethod
def request_password_reset(email):
'''Trigger Parse\'s Password Process. Return True/False
indicate success/failure on the request'''
url = '/'.join([API_ROOT, 'requestPasswordReset'])
try:
User.POST(url, email=email)
return True
except ParseError:
return False
def _to_native(self):
return dict([(k, ParseType.convert_to_parse(v, as_pointer=True))
for k, v in self._editable_attrs.items()])
@property
def className(self):
return '_User'
def __repr__(self):
return '<User:%s (Id %s)>' % (getattr(self, 'username', None), self.objectId)
def removeRelation(self, key, className, objectsId):
self.manageRelation('RemoveRelation', key, className, objectsId)
def addRelation(self, key, className, objectsId):
self.manageRelation('AddRelation', key, className, objectsId)
def manageRelation(self, action, key, className, objectsId):
objects = [{
"__type": "Pointer",
"className": className,
"objectId": objectId
} for objectId in objectsId]
payload = {
key: {
"__op": action,
"objects": objects
}
}
self.__class__.PUT(self._absolute_url, **payload)
self.__dict__[key] = ''
User.Query = QueryManager(User)
| StarcoderdataPython |
11322345 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import sys
from flink.plan.Environment import get_environment
from flink.plan.Constants import INT, STRING
from flink.functions.FlatMapFunction import FlatMapFunction
from flink.functions.GroupReduceFunction import GroupReduceFunction
class Tokenizer(FlatMapFunction):
def flat_map(self, value, collector):
for word in value.lower().split():
collector.collect((1, word))
class Adder(GroupReduceFunction):
def reduce(self, iterator, collector):
count, word = iterator.next()
count += sum([x[0] for x in iterator])
collector.collect((count, word))
if __name__ == "__main__":
env = get_environment()
if len(sys.argv) != 1 and len(sys.argv) != 3:
sys.exit("Usage: ./bin/pyflink.sh WordCount[ - <text path> <result path>]")
if len(sys.argv) == 3:
data = env.read_text(sys.argv[1])
else:
data = env.from_elements("hello","world","hello","car","tree","data","hello")
result = data \
.flat_map(Tokenizer(), (INT, STRING)) \
.group_by(1) \
.reduce_group(Adder(), (INT, STRING), combinable=True) \
if len(sys.argv) == 3:
result.write_csv(sys.argv[2])
else:
result.output()
env.set_degree_of_parallelism(1)
env.execute(local=True) | StarcoderdataPython |
9652009 | <reponame>anniyanvr/ray<gh_stars>0
import unittest
from unittest.mock import Mock, MagicMock
from ray.rllib.examples.env.random_env import RandomEnv
from ray.rllib.utils.pre_checks.env import check_gym_environments
class TestGymCheckEnv(unittest.TestCase):
def test_has_observation_and_action_space(self):
env = Mock(spec=[])
with pytest.raises(
AttributeError, match="Env must have observation_space."):
check_gym_environments(env)
env = Mock(spec=["observation_space"])
with pytest.raises(
AttributeError, match="Env must have action_space."):
check_gym_environments(env)
del env
def test_obs_and_action_spaces_are_gym_spaces(self):
env = RandomEnv()
observation_space = env.observation_space
env.observation_space = "not a gym space"
with pytest.raises(
ValueError, match="Observation space must be a gym.space"):
check_gym_environments(env)
env.observation_space = observation_space
env.action_space = "not an action space"
with pytest.raises(
ValueError, match="Action space must be a gym.space"):
check_gym_environments(env)
del env
def test_sampled_observation_contained(self):
env = RandomEnv()
# check for observation that is out of bounds
error = ".*A sampled observation from your env wasn't contained .*"
env.observation_space.sample = MagicMock(return_value=5)
with pytest.raises(ValueError, match=error):
check_gym_environments(env)
# check for observation that is in bounds, but the wrong type
env.observation_space.sample = MagicMock(return_value=float(1))
with pytest.raises(ValueError, match=error):
check_gym_environments(env)
del env
def test_sampled_action_contained(self):
env = RandomEnv()
error = ".*A sampled action from your env wasn't contained .*"
env.action_space.sample = MagicMock(return_value=5)
with pytest.raises(ValueError, match=error):
check_gym_environments(env)
# check for observation that is in bounds, but the wrong type
env.action_space.sample = MagicMock(return_value=float(1))
with pytest.raises(ValueError, match=error):
check_gym_environments(env)
del env
def test_reset(self):
reset = MagicMock(return_value=5)
env = RandomEnv()
env.reset = reset
# check reset with out of bounds fails
error = ".*The observation collected from env.reset().*"
with pytest.raises(ValueError, match=error):
check_gym_environments(env)
# check reset with obs of incorrect type fails
reset = MagicMock(return_value=float(1))
env.reset = reset
with pytest.raises(ValueError, match=error):
check_gym_environments(env)
del env
def test_step(self):
step = MagicMock(return_value=(5, 5, True, {}))
env = RandomEnv()
env.step = step
error = ".*The observation collected from env.step.*"
with pytest.raises(ValueError, match=error):
check_gym_environments(env)
# check reset that returns obs of incorrect type fails
step = MagicMock(return_value=(float(1), 5, True, {}))
env.step = step
with pytest.raises(ValueError, match=error):
check_gym_environments(env)
# check step that returns reward of non float/int fails
step = MagicMock(return_value=(1, "Not a valid reward", True, {}))
env.step = step
error = ("Your step function must return a reward that is integer or "
"float.")
with pytest.raises(AssertionError, match=error):
check_gym_environments(env)
# check step that returns a non bool fails
step = MagicMock(
return_value=(1, float(5), "not a valid done signal", {}))
env.step = step
error = "Your step function must return a done that is a boolean."
with pytest.raises(AssertionError, match=error):
check_gym_environments(env)
# check step that returns a non dict fails
step = MagicMock(
return_value=(1, float(5), True, "not a valid env info"))
env.step = step
error = "Your step function must return a info that is a dict."
with pytest.raises(AssertionError, match=error):
check_gym_environments(env)
del env
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| StarcoderdataPython |
5178859 | import string
def is_pangram(sentence):
for c in string.ascii_lowercase:
if c not in sentence.lower():
return False
return True
| StarcoderdataPython |
3556095 | from django.db import models
class User(models.Model):
created = models.DateTimeField(auto_now_add=True)
first_name = models.CharField(max_length=63)
last_name = models.CharField(max_length=63)
| StarcoderdataPython |
1833610 | <gh_stars>0
from decimal import Decimal
from nose.tools import eq_
import test_utils
from lib.sellers.models import Seller, SellerPaypal, SellerProduct
from lib.transactions import constants
from lib.transactions.models import Transaction
from lib.transactions.utils import completed, refunded, reversal
class TestIPN(test_utils.TestCase):
def setUp(self):
self.transaction_uuid = 'transaction:uid'
self.seller = Seller.objects.create(uuid='seller:uid')
self.product = SellerProduct.objects.create(seller=self.seller)
self.paypal = SellerPaypal.objects.create(seller=self.seller,
paypal_id='<EMAIL>')
self.transaction = Transaction.objects.create(
type=constants.TYPE_PAYMENT,
status=constants.STATUS_PENDING,
uid_support='asd',
uid_pay='asd',
seller_product=self.product,
provider=constants.SOURCE_PAYPAL,
amount=Decimal('10'),
currency='USD',
uuid=self.transaction_uuid)
def get_transaction(self):
return Transaction.objects.get(pk=self.transaction.pk)
def test_complete(self):
completed({'tracking_id': self.transaction_uuid}, {})
eq_(self.get_transaction().status, constants.STATUS_COMPLETED)
def test_complete_missing(self):
eq_(completed({'tracking_id': self.transaction_uuid + 'nope'}, {}),
False)
def test_complete_already(self):
self.transaction.status = constants.STATUS_COMPLETED
self.transaction.save()
eq_(completed({'tracking_id': self.transaction_uuid}, {}), False)
def test_refund(self):
self.transaction.status = constants.STATUS_COMPLETED
self.transaction.save()
refunded({'tracking_id': self.transaction_uuid, 'pay_key': 'foo'},
{'amount': {'amount': 10, 'currency': 'USD'}})
types = [s.type for s in Transaction.objects.all()]
eq_([constants.TYPE_REFUND, constants.TYPE_PAYMENT], types)
def test_refund_amount(self):
self.transaction.status = constants.STATUS_COMPLETED
self.transaction.save()
refunded({'tracking_id': self.transaction_uuid, 'pay_key': 'foo'},
{'amount': {'amount': 10, 'currency': 'USD'}})
trans = Transaction.objects.get(type=constants.TYPE_REFUND)
eq_(trans.amount, Decimal('-10'))
eq_(trans.currency, 'USD')
def test_refund_missing(self):
eq_(refunded({'tracking_id': self.transaction_uuid + 'nope'}, {}),
False)
def test_refund_already_done(self):
Transaction.objects.create(
type=constants.TYPE_REFUND,
status=constants.STATUS_PENDING,
uid_support='asd-123',
uid_pay='asd-123',
seller_product=self.product,
amount=Decimal('-10'),
currency='USD',
provider=constants.SOURCE_PAYPAL,
uuid=self.transaction_uuid + 'another',
related=self.transaction)
eq_(refunded({'tracking_id': self.transaction_uuid}, {}), False)
def test_reject_missing(self):
eq_(reversal({'tracking_id': self.transaction_uuid + 'nope'}, {}),
False)
def test_rejected_already_done(self):
Transaction.objects.create(
type=constants.TYPE_REVERSAL,
status=constants.STATUS_PENDING,
uid_support='asd-123',
uid_pay='asd-123',
seller_product=self.product,
amount=Decimal('-10'),
currency='USD',
provider=constants.SOURCE_PAYPAL,
uuid=self.transaction_uuid + 'another',
related=self.transaction)
eq_(reversal({'tracking_id': self.transaction_uuid}, {}), False)
def test_rejected(self):
self.transaction.status = constants.STATUS_COMPLETED
self.transaction.save()
reversal({'tracking_id': self.transaction_uuid, 'pay_key': 'foo'},
{'amount': {'amount': 10, 'currency': 'USD'}})
types = [s.type for s in Transaction.objects.all()]
eq_([constants.TYPE_REVERSAL, constants.TYPE_PAYMENT], types)
| StarcoderdataPython |
8076950 | import mock
from django.conf import settings
from django.test import TestCase
from . import CommonCommandsTestMixin, call_command_real as call_command
global_mock = mock.MagicMock()
class TestInitCommand(CommonCommandsTestMixin, TestCase):
def setUp(self):
super(TestInitCommand, self).setUp()
patch_settings = mock.patch('django.conf.settings', global_mock)
self.mock_settings = patch_settings.start()
self.addCleanup(patch_settings.stop)
def test_default_commands_called(self):
call_command('upgrade')
self.assertEqual(self.mock_call.call_count, 1)
self.mock_call.assert_any_call(
'syncdb', database='default',
interactive=False)
def test_with_south_commands_called(self):
self.mock_settings.INSTALLED_APPS = settings.INSTALLED_APPS + ['south']
call_command('upgrade')
self.assertEqual(self.mock_call.call_count, 2)
self.mock_call.assert_any_call(
'syncdb', database='default',
interactive=False)
self.mock_call.assert_any_call(
'migrate', database='default',
interactive=False, delete_ghosts=True)
| StarcoderdataPython |
6552157 | <filename>Part-2/generators.py
# def yrange(n):
# i=0
# while i<n:
# yield i
# i+=1
# y=yrange(4)
# # print y
# print y.next()
# print y.next()
# print y.next()
# print y.next()
# print y.next()
def myCustomerGenerator():
print("Start Generator")
i=0
while(i<10):
yield i
i+=1
m=myCustomerGenerator()
for i in m:
print i | StarcoderdataPython |
1960784 | <filename>pyorb_core/rb_library/affine_decomposition.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 10 14:41:45 2018
@author: <NAME>
@email : <EMAIL>
"""
import numpy as np
import pyorb_core.algebraic_utils as alg_ut
class AffineDecomposition( ):
def __init__( self ):
return
def get_Qa( self ):
return self.M_qa
def get_Qf( self ):
return self.M_qf
def set_Q( self, _qa, _qf ):
self.M_qa = _qa
self.M_qf = _qf
return
def print_ad_summary( self ):
print( "Number of affine decomposition matrices %d" % self.M_qa )
print( "Number of affine decomposition vectors %d" % self.M_qf )
return
M_qa = 0
M_qf = 0
class AffineDecompositionHandler( ):
def __init__( self ):
return
def get_Qa( self ):
return self.M_affineDecomposition.get_Qa( )
def get_Qf( self ):
return self.M_affineDecomposition.get_Qf( )
def set_Q( self, _qa, _qf ):
self.M_affineDecomposition.set_Q( _qa, _qf )
return
def get_affine_matrix( self, _q ):
return self.M_feAffineAq[_q]
def get_affine_vector( self, _q ):
return self.M_feAffineFq[_q]
def get_rb_affine_matrix( self, _q ):
return self.M_rbAffineAq[_q]
def get_rb_affine_vector( self, _q ):
return self.M_rbAffineFq[_q]
# _input_file should be the string that have in common the affine matrices
def import_affine_matrices( self, _input_file ):
Qa = self.M_affineDecomposition.get_Qa( )
assert Qa > 0
self.M_feAffineAq = []
for iQa in range( Qa ):
self.M_feAffineAq.append( np.loadtxt( _input_file + str(iQa) + '.txt' ) ) # importing matrix in sparse format
# if the matrix indices start from 1, we rescale them
if np.min( self.M_feAffineAq[iQa][:, 0:2] ) > 0 :
self.M_feAffineAq[iQa][:, 0:2] = self.M_feAffineAq[iQa][:, 0:2] - 1
return
# _input_file should be the string that have in common the affine matrices
def import_affine_vectors( self, _input_file ):
Qf = self.M_affineDecomposition.get_Qf( )
assert Qf > 0
self.M_feAffineFq = []
for iQf in range( Qf ):
self.M_feAffineFq.append( np.loadtxt( _input_file + str(iQf) + '.txt' ) ) # importing vectors
return
def import_rb_affine_matrices( self, _input_file ):
Qa = self.M_affineDecomposition.get_Qa( )
assert Qa > 0
self.M_rbAffineAq = []
for iQa in range( Qa ):
self.M_rbAffineAq.append( np.loadtxt( _input_file + str(iQa) + '.txt' ) ) # importing rb matrix
return
# _input_file should be the string that have in common the affine matrices
def import_rb_affine_vectors( self, _input_file ):
Qf = self.M_affineDecomposition.get_Qf( )
assert Qf > 0
self.M_rbAffineFq = []
for iQf in range( Qf ):
self.M_rbAffineFq.append( np.loadtxt( _input_file + str(iQf) + '.txt' ) ) # importing rb vectors
return
def print_ad_summary( self ):
self.M_affineDecomposition.print_ad_summary( )
return
def print_affine_components( self ):
Qf = self.get_Qf( )
Qa = self.get_Qa( )
for iQf in range( Qf ):
print( '\nRB rhs affine components %d \n' % iQf )
print( self.M_rbAffineFq[iQf] )
for iQa in range( Qa ):
print( '\nRB mat affine components %d \n' % iQa )
print( self.M_rbAffineAq[iQa] )
return
def reset_rb_approximation( self ):
self.M_rbAffineFq = []
self.M_rbAffineAq = []
def set_affine_a( self, _feAffineAq ):
self.M_feAffineAq = _feAffineAq
def set_affine_f( self, _feAffineFq ):
self.M_feAffineFq = _feAffineFq
def build_rb_affine_decompositions( self, _basis, _fom_problem, _build_rb_tpl=False ):
N = _basis.shape[1]
Qf = self.get_Qf( )
Qa = self.get_Qa( )
if _build_rb_tpl is not True:
if self.check_set_fom_arrays( ) == False:
print( "Importing FOM affine arrays " )
if len( self.M_feAffineFq ) < Qf:
print( "I am importing f affine arrays " )
fff = _fom_problem.retrieve_fom_affine_components( 'f', Qf - len( self.M_feAffineFq ) )
starting_Qf = len( self.M_feAffineFq )
for iQf in range( Qf - starting_Qf ):
self.M_feAffineFq.append( np.array( fff['f' + str(iQf)] ) )
if len( self.M_feAffineAq ) < Qa:
print( "I am importing A affine arrays starting from %d and to %d " % (len( self.M_feAffineAq ), Qa) )
AAA = _fom_problem.retrieve_fom_affine_components( 'A', Qa - len( self.M_feAffineAq ) )
starting_Qa = len( self.M_feAffineAq )
for iQa in range( Qa - starting_Qa ):
print( "I am importing A affine array %d " % (iQa + starting_Qa) )
self.M_feAffineAq.append( AAA['A' + str(iQa)] )
if len( self.M_feAffineAq ) < Qa:
print( "I am importing A - Jacobian affine arrays starting from %d and to %d " % (len( self.M_feAffineAq ), Qa) )
AAA = _fom_problem.retrieve_fom_affine_components( 'Aj', Qa - len( self.M_feAffineAq ) )
starting_Qa = len( self.M_feAffineAq )
for iQa in range( Qa - starting_Qa ):
print( "I am importing A affine array %d " % (iQa + starting_Qa) )
self.M_feAffineAq.append( AAA['A' + str(iQa)] )
else:
print( "Already set the FOM affine arrays " )
for iQf in range( Qf ):
self.M_rbAffineFq.append( np.zeros( N ) )
self.M_rbAffineFq[iQf] = _basis.T.dot( self.M_feAffineFq[iQf] )
for iQa in range( Qa ):
Av = alg_ut.sparse_matrix_vector_mul( self.M_feAffineAq[iQa], _basis )
self.M_rbAffineAq.append( np.zeros( (N, N) ) )
self.M_rbAffineAq[iQa] = _basis.T.dot( Av )
elif _build_rb_tpl is True:
print( 'Constructing the RB basis from the affine componenets obtained from (M)DEIM at first' )
for iQf in range( len(self.M_feAffineFq) ):
self.M_rbAffineFq.append( np.zeros( N ) )
self.M_rbAffineFq[iQf] = _basis.T.dot( self.M_feAffineFq[iQf] )
for iQa in range( len(self.M_feAffineAq) ):
Av = alg_ut.sparse_matrix_vector_mul( self.M_feAffineAq[iQa], _basis )
self.M_rbAffineAq.append( np.zeros( (N, N) ) )
self.M_rbAffineAq[iQa] = _basis.T.dot( Av )
print( "Importing directly the truly RB affine arrays from TPL " )
rbAffineFq_components = _fom_problem.retrieve_rb_affine_components( 'f' )
for iQf in range( len( rbAffineFq_components ) ):
self.M_rbAffineFq.append( np.array (rbAffineFq_components['fN' + str(iQf)] ) )
rbAffineAq_components = _fom_problem.retrieve_rb_affine_components( 'A' )
for iQa in range( len( rbAffineAq_components ) ):
self.M_rbAffineAq.append( np.array( rbAffineAq_components['AN' + str(iQa)] ) )
"""
if len( self.M_feAffineAq ) < Qa:
rbAffineAjq_components = _fom_problem.retrieve_rb_affine_components( 'Aj' )
for iQaj in range( len( rbAffineAjq_components ) ):
self.M_rbAffineAq.append( rbAffineAjq_components[iQaj] )
"""
print( 'Finished to build the RB affine arrays' )
return
def check_set_fom_arrays( self ):
return len( self.M_feAffineAq ) >= self.get_Qa( ) and len( self.M_feAffineFq ) >= self.get_Qf( )
def save_rb_affine_decomposition( self, _file_name ):
Qf = self.get_Qf( )
Qa = self.get_Qa( )
for iQa in range( Qa ):
output_file = open( _file_name + '_A' + str( iQa ) + '.txt', 'w+' )
for iN in range( self.M_rbAffineAq[iQa].shape[0] ):
for jN in range( self.M_rbAffineAq[iQa].shape[1] ):
output_file.write( "%.10g" % self.M_rbAffineAq[iQa][iN, jN] )
if jN < self.M_rbAffineAq[iQa].shape[1] - 1:
output_file.write( " " % self.M_rbAffineAq[iQa][iN, jN] )
else:
output_file.write( "\n" % self.M_rbAffineAq[iQa][iN, jN] )
output_file.close( )
for iQf in range( Qf ):
output_file = open( _file_name + '_f' + str( iQf ) + '.txt', 'w+' )
for iN in range( self.M_rbAffineFq[iQf].shape[0] ):
output_file.write( "%.10g" % self.M_rbAffineFq[iQf][iN] )
output_file.write( " " % self.M_rbAffineFq[iQf][iN] )
output_file.write( "\n" % self.M_rbAffineFq[iQf][iN] )
output_file.close( )
return
def import_affine_components( self, _affine_components ):
self.M_rbAffineAq = []
self.M_rbAffineFq = []
Qf = self.get_Qf( )
Qa = self.get_Qa( )
for iQa in range( Qa ):
self.M_rbAffineAq.append( np.loadtxt( _affine_components + '_A' + str( iQa ) + '.txt' ) )
for iQf in range( Qf ):
self.M_rbAffineFq.append( np.loadtxt( _affine_components + '_f' + str( iQf ) + '.txt' ) )
return
M_feAffineAq = []
M_feAffineFq = []
M_rbAffineAq = []
M_rbAffineFq = []
M_affineDecomposition = AffineDecomposition( )
| StarcoderdataPython |
8074273 | from mock_server.api import Response
import json
def provider(request):
if request.uri == '/abc':
headers = [("Content-Type", "application/json; charset=utf-8")]
content = {"name": "Tomas", "surname": "Hanacek"}
return Response(json.dumps(content), headers, 200)
| StarcoderdataPython |
183626 | <filename>examples/api/profile/update_profile.py
from instauto.api.client import ApiClient
import instauto.api.actions.structs.profile as pr
from instauto.api.structs import WhichGender
client = ApiClient.initiate_from_file('.instauto.save')
# With the update object, you can update multiple attributes at the same time, but you
# can also just set one property.
obj = pr.Update(
"https://github.com/stanvanrooy/instauto",
"your phone number",
"your new username",
"your new first name",
"your new email"
)
client.profile_update(obj)
| StarcoderdataPython |
4952479 | <reponame>computing-intelligence/lottery
# @Time : 2021-08-31 10:49
# @Author : 老赵
# @File : lottery.py
import random
import time
from collections import Counter
from tqdm import tqdm
class Lottery:
def __init__(self, codes, winner_nums):
self.codes = codes
self.winner_nums = winner_nums
self.winner_codes = []
def choose(self):
for i in tqdm(range(500)):
chosen = random.choice(self.codes)
self.winner_codes.append(chosen)
time.sleep(0.01)
def shuffle_codes(self):
random.shuffle(self.codes)
def draw(self):
self.choose()
counter = Counter(self.winner_codes)
lucky_guys = counter.most_common(self.winner_nums * 2)
# print(lucky_guys)
return lucky_guys
def sample_lucky(self):
lucky_guys = self.draw()
if lucky_guys[self.winner_nums - 1][1] != lucky_guys[self.winner_nums][1]:
return [i[0] for i in lucky_guys[:self.winner_nums]]
else:
min_num = lucky_guys[self.winner_nums - 1][1]
min_lucky, lucky_codes_list = [], []
for i, j in enumerate(lucky_guys):
if j[1] == min_num:
min_lucky.append(j[0])
elif j[1] > min_num:
lucky_codes_list.append(j[0])
else:
break
lucky_mins = random.sample(min_lucky, self.winner_nums - len(lucky_codes_list))
lucky_codes_list.extend(lucky_mins)
assert len(lucky_codes_list) == self.winner_nums, '获取的中奖码数量有误!'
self.remove_codes(lucky_codes_list)
return lucky_codes_list
def remove_codes(self, used_codes):
for i in used_codes:
self.codes.remove(i)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.