index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
15,000 | cd2557bde0664237e44b4e93354d177ef548a497 | # coding=utf-8
import pymysql
import logging
from offer.projects.Automation.excel_util import excelutil
from offer.projects.Automation.file_util import fileutil
'''
构建util
util=mysql_util({'host':'127.0.0.1', 'user':'', 'passwd':'', 'db':''})
执行sql,仅查看
util.getData("sql")
util.copy_to_text('select * from log_maintenance_result limit 100','d:/1.txt')
util.copy_to_excel('select * from log_maintenance_result limit 100','d:/1.xls')
关闭链接
util.disconnect()
'''
class mysql_util:
level = logging.DEBUG
filename = None
filemode = None
def __init__(self, parameter, port=3306):
logging.basicConfig(level=self.level, filename=self.filename, filemode=self.filemode,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
self.logger = logging.getLogger(__name__)
self.connect = pymysql.Connect(host=parameter['host'], port=port, user=parameter['user'],
passwd=parameter['passwd'], db=parameter['db'], charset='utf8')
self.cursor = self.connect.cursor()
def run(self, sql):
self.sql = sql
self.cursor.execute(sql)
self.connect.commit()
def getData(self, sql):
self.run(sql)
temp = []
for row in self.cursor.fetchall():
temp.append(row)
self.logger.debug(row)
return temp
def disconnect(self):
if self.cursor:
self.cursor.close()
if self.connect:
self.connect.close()
def copy_to_excel(self, sql, file=None):
data = self.getData(sql)
util = excelutil(file=file, mode='w')
util.write_lines(data, save=True)
def copy_to_text(self, sql, file):
data = self.getData(sql)
util = fileutil(file, mode='w', encoding='utf-8')
util.writelines(data)
util.close()
#if __name__ == '__main__':
|
15,001 | 6246c1ed62d36c131c195f697272b171bba223e0 | #!/usr/bin/python
from fann2 import libfann
connection_rate = 1
learning_rate = 0.07
num_input = 64
num_hidden_1 = 20
num_output = 10
desired_error = 0.0001
max_iterations = 100000
iterations_between_reports = 100
ann = libfann.neural_net()
ann.create_sparse_array(connection_rate, (num_input, num_hidden_1, num_output))
#ann.randomize_weights(ann, -1, 1)
ann.set_learning_rate(learning_rate)
ann.set_activation_function_output(libfann.SIGMOID_SYMMETRIC_STEPWISE)
ann.train_on_file("../../preproc/data/original.data", max_iterations, iterations_between_reports, desired_error)
ann.save("NN.net")
|
15,002 | 044778968e3af8d3cbb7f665a4eac2278ab8ad4c | # -*- coding:utf-8 -*-
import scipy.misc
import numpy as np
import os
from glob import glob
import cv2
import tensorflow as tf
import tensorflow.contrib.slim as slim
#from keras.datasets import cifar10, mnist
from tensorflow.contrib.framework import arg_scope, add_arg_scope
from tensorflow.contrib.layers import batch_norm
from tflearn.layers.conv import global_avg_pool
import utilsForTF
def get_image_label_batch(config, shuffle, name):
with tf.name_scope('get_batch'):
Data = utilsForTF.Data_set(config, shuffle=shuffle, name=name)
image_batch, label_batch = Data.read_processing_generate_image_label_batch()
return image_batch, label_batch
def count_trainable_params():
total_parameters = 0
a = []
for variable in tf.trainable_variables():
a.append(variable)
shape = variable.get_shape()
variable_parametes = 1
for dim in shape:
variable_parametes *= dim.value
total_parameters += variable_parametes
print("Total training params: %.1fM" % (total_parameters / 1e6))
return total_parameters
class ImageData:
def __init__(self, load_size, channels):
self.load_size = load_size
self.channels = channels
def image_processing(self, filename):
x = tf.read_file(filename)
x_decode = tf.image.decode_jpeg(x, channels=self.channels)
img = tf.image.resize_images(x_decode, [self.load_size, self.load_size])
img = tf.cast(img, tf.float32) / 127.5 - 1
return img
#def load_mnist(size=64):
# (train_data, train_labels), (test_data, test_labels) = mnist.load_data()
# train_data = normalize(train_data)
# test_data = normalize(test_data)
# x = np.concatenate((train_data, test_data), axis=0)
# y = np.concatenate((train_labels, test_labels), axis=0).astype(np.int)
# seed = 777
# np.random.seed(seed)
# np.random.shuffle(x)
# np.random.seed(seed)
# np.random.shuffle(y)
# x = np.expand_dims(x, axis=-1)
# x = np.asarray([scipy.misc.imresize(x_img, [size, size]) for x_img in x])
# x = np.expand_dims(x, axis=-1)
# return x
'''def load_cifar10(size=64) :
(train_data, train_labels), (test_data, test_labels) = cifar10.load_data()
train_data = normalize(train_data)
test_data = normalize(test_data)
x = np.concatenate((train_data, test_data), axis=0)
# y = np.concatenate((train_labels, test_labels), axis=0).astype(np.int)
seed = 777
np.random.seed(seed)
np.random.shuffle(x)
# np.random.seed(seed)
# np.random.shuffle(y)
x = np.asarray([scipy.misc.imresize(x_img, [size, size]) for x_img in x])
return x
def load_data(dataset_name, size=64) :
if dataset_name == 'mnist' :
x = load_mnist(size)
elif dataset_name == 'cifar10' :
x = load_cifar10(size)
else :
x = glob(os.path.join("./dataset", dataset_name, '*.*'))
return x'''
def preprocessing(x, size):
x = scipy.misc.imread(x, mode='RGB')
x = scipy.misc.imresize(x, [size, size])
x = normalize(x)
return x
def normalize(x) :
return x/127.5 - 1
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def merge(images, size):
images = np.nan_to_num(images)
h, w = images.shape[1], images.shape[2]
h, w = 128, 128
if (images.shape[3] in (3,4)):
c = images.shape[3]
img = np.zeros((h * size[0], w * size[1], c))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = cv2.resize(image,(h,w))
#img[j * h:j * h + h, i * w:i * w + w, :] = image
return img
elif images.shape[3]==1:
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w] = cv2.resize(image,(h,w))
return img
else:
raise ValueError('in merge(images,size) images parameter ''must have dimensions: HxW or HxWx3 or HxWx4')
def imsave(images, size, path):
# image = np.squeeze(merge(images, size)) # 채널이 1인거 제거 ?
return scipy.misc.imsave(path, merge(images, size))
def inverse_transform(images):
return (images+1.)/2.
def check_folder(log_dir):
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
def show_all_variables():
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
def str2bool(x):
return x.lower() in ('true')
|
15,003 | d50d4107406ef8b71c276c0a209b92a8e32278ee | import random
# 20 numbers between 0 and 49 inclusive
ints = [random.randrange(50) for i in range(20)]
print ints
squares = map(lambda x: x**2, ints)
print squares |
15,004 | e4ee4c29400e94a654a85ac57ad419eba9856e96 | import asyncio
import discord
from discord.ext import commands
import base64
import binascii
import re
from Cogs import Nullify
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(Encode(bot, settings))
class Encode:
# Init with the bot reference
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
def suppressed(self, guild, msg):
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(guild, "SuppressMentions"):
return Nullify.clean(msg)
else:
return msg
# Helper methods
def _to_bytes(self, in_string):
return in_string.encode('utf-8')
def _to_string(self, in_bytes):
return in_bytes.decode('utf-8')
# Check hex value
def _check_hex(self, hex_string):
if hex_string.lower().startswith("0x"):
hex_string = hex_string[2:]
hex_string = re.sub(r'[^0-9A-Fa-f]+', '', hex_string)
return hex_string
# To base64 methods
def _ascii_to_base64(self, ascii_string):
ascii_bytes = self._to_bytes(ascii_string)
base_64 = base64.b64encode(ascii_bytes)
return self._to_string(base_64)
def _hex_to_base64(self, hex_string):
hex_string = self._check_hex(hex_string)
hex_s_bytes = self._to_bytes(hex_string)
hex_bytes = binascii.unhexlify(hex_s_bytes)
base64_bytes = base64.b64encode(hex_bytes)
return self._to_string(base64_bytes)
# To ascii methods
def _hex_to_ascii(self, hex_string):
hex_string = self._check_hex(hex_string)
hex_bytes = self._to_bytes(hex_string)
ascii_bytes = binascii.unhexlify(hex_bytes)
return self._to_string(ascii_bytes)
def _base64_to_ascii(self, base64_string):
base64_bytes = self._to_bytes(base64_string)
ascii_bytes = base64.b64decode(base64_bytes)
return self._to_string(ascii_bytes)
# To hex methods
def _ascii_to_hex(self, ascii_string):
ascii_bytes = self._to_bytes(ascii_string)
hex_bytes = binascii.hexlify(ascii_bytes)
return self._to_string(hex_bytes)
def _base64_to_hex(self, base64_string):
b64_string = self._to_bytes(base64_string)
base64_bytes = base64.b64decode(b64_string)
hex_bytes = binascii.hexlify(base64_bytes)
return self._to_string(hex_bytes)
def _rgb_to_hex(self, r, g, b):
return '#%02x%02x%02x' % (r, g, b)
def _hex_to_rgb(self, _hex):
_hex = _hex.replace("#", "")
l_hex = len(_hex)
return tuple(int(_hex[i:i + l_hex // 3], 16) for i in range(0, l_hex, l_hex // 3))
def _cmyk_to_rgb(self, c, m, y, k):
c, m, y, k = [float(x)/100.0 for x in tuple([c, m, y, k])]
return tuple([round(255.0 - ((min(1.0, x * (1.0 - k) + k)) * 255.0)) for x in tuple([c, m, y])])
def _rgb_to_cmyk(self, r, g, b):
c, m, y = [1 - x/255 for x in tuple([r, g, b])]
min_cmy = min(c, m, y)
return tuple([0,0,0,100]) if all(x == 0 for x in [r, g, b]) else tuple([round(x*100) for x in [(x - min_cmy) / (1 - min_cmy) for x in tuple([c, m, y])] + [min_cmy]])
@commands.command()
async def color(self, ctx, *, value = None):
"""
View info on a rgb, hex or cmyk color and their
values in other formats
Example usage:
color #3399cc
color rgb(3, 4, 5)
"""
if not value:
await ctx.send("Usage: `{}color [value]`".format(ctx.prefix))
return
value = value.lower()
if not any(value.startswith(x) for x in ["#", "rgb", "cmyk"]):
await ctx.send("Invalid value color format, please choose from rgb, cmyk or hex")
return
error = False
if value.startswith('rgb'):
count = value.count('(') + value.count(')') + value.count(',')
if count != 4:
error = True
number_list = value.lower().replace("rgb", "").replace("(", "").replace(")", "").replace(" ", "")
try:
r, g, b = map(int, number_list.split(','))
if (r < 0 or r > 255) or (g < 0 or g > 255) or (b < 0 or b > 255):
error = True
except:
error = True
if error:
await ctx.send("Invalid RGB color format!")
return
_hex = self._rgb_to_hex(r,g,b)
c, m, y, k = self._rgb_to_cmyk(r, g, b)
embed_color = int("0x{}".format(_hex.replace("#", '')), 16)
embed = discord.Embed(color=embed_color)
embed.title = "Color {}".format(value.replace(" ", ""))
embed.add_field(name="Hex", value=_hex)
embed.add_field(name="CMYK", value="cmyk({}, {}, {}, {})".format(c, m, y, k))
elif value.startswith('#'):
match = re.search(r'^#(?:[0-9a-fA-F]{3}){1,2}$', value)
if not match:
await ctx.send("Invalid Hex color format!")
return
embed_color = int("0x{}".format(value.replace('#', '')), 16)
embed = discord.Embed(color=embed_color)
r, g, b = self._hex_to_rgb(value)
c, m, y, k = self._rgb_to_cmyk(r, g, b)
embed.title = "Color {}".format(value.replace(" ", ""))
embed.add_field(name="RGB", value="rgb({}, {}, {})".format(r, g, b))
embed.add_field(name="CMYK", value="cmyk({}, {}, {}, {})".format(c, m, y, k))
elif value.startswith('cmyk'):
count = value.count('(') + value.count(')') + value.count(',')
if count != 5:
error = True
number_list = value.lower().replace("cmyk", "").replace("(", "").replace(")", "").replace(" ", "")
try:
c, m, y, k = map(int, number_list.split(','))
if (c < 0 or c > 255) or (m < 0 or m > 255) or (y < 0 or y > 255) or (k < 0 or k > 255):
error = True
except:
error = True
if error:
await ctx.send("Invalid CMYK color format!")
return
r, g, b = self._cmyk_to_rgb(c, m, y, k)
_hex = self._rgb_to_hex(r, g, b)
embed_color = int("0x{}".format(_hex.replace("#", '')), 16)
embed = discord.Embed(color=embed_color)
embed.title = "Color {}".format(value.replace(" ", ""))
embed.add_field(name="Hex", value=_hex)
embed.add_field(name="RGB", value="rgb({}, {}, {})".format(r, g, b))
await ctx.send(embed=embed)
@commands.command(pass_context=True)
async def slide(self, ctx, input_hex = None):
"""Calculates your slide value for Clover based on an input address (in hex)."""
try:
# We're accepting strings here - convert
start_addr = int(input_hex, 16)
except:
await ctx.send("Malformed input hex - try again.")
return
# Setup our temp vars
first_str = "0x100000"
first = int(first_str, 16)
secon_str = "0x200000"
secon = int(secon_str, 16)
slide_float = ( start_addr - first ) / secon
if slide_float > int(slide_float):
# has a > 0 decimal - round up
slide_float = int(slide_float) + 1
await ctx.send("```\nslide={}\n```".format(slide_float))
@commands.command(pass_context=True)
async def hexdec(self, ctx, *, input_hex = None):
"""Converts hex to decimal."""
if input_hex == None:
await ctx.send("Usage: `{}hexdec [input_hex]`".format(ctx.prefix))
return
input_hex = self._check_hex(input_hex)
if not len(input_hex):
await ctx.send("Malformed hex - try again.")
return
try:
dec = int(input_hex, 16)
except Exception:
await ctx.send("I couldn't make that conversion!")
return
await ctx.send(dec)
@commands.command(pass_context=True)
async def dechex(self, ctx, *, input_dec = None):
"""Converts an int to hex."""
if input_dec == None:
await ctx.send("Usage: `{}dechex [input_dec]`".format(ctx.prefix))
return
try:
input_dec = int(input_dec)
except Exception:
await ctx.send("Input must be an integer.")
return
await ctx.send("0x" + "{:x}".format(input_dec).upper())
@commands.command(pass_context=True)
async def strbin(self, ctx, *, input_string = None):
"""Converts the input string to its binary representation."""
if input_string == None:
await ctx.send("Usage: `{}strbin [input_string]`".format(ctx.prefix))
return
msg = ''.join('{:08b}'.format(ord(c)) for c in input_string)
# Format into blocks:
# - First split into chunks of 8
msg_list = re.findall('........?', msg)
# Now we format!
msg = "```\n"
msg += " ".join(msg_list)
msg += "```"
if len(msg) > 1993:
await ctx.send("Well... that was *a lot* of 1s and 0s. Maybe try a smaller string... Discord won't let me send all that.")
return
await ctx.send(msg)
@commands.command(pass_context=True)
async def binstr(self, ctx, *, input_binary = None):
"""Converts the input binary to its string representation."""
if input_binary == None:
await ctx.send("Usage: `{}binstr [input_binary]`".format(ctx.prefix))
return
# Clean the string
new_bin = ""
for char in input_binary:
if char is "0" or char is "1":
new_bin += char
if not len(new_bin):
await ctx.send("Usage: `{}binstr [input_binary]`".format(ctx.prefix))
return
msg = ''.join(chr(int(new_bin[i:i+8], 2)) for i in range(0, len(new_bin), 8))
await ctx.send(self.suppressed(ctx.guild, msg))
@commands.command(pass_context=True)
async def binint(self, ctx, *, input_binary = None):
"""Converts the input binary to its integer representation."""
if input_binary == None:
await ctx.send("Usage: `{}binint [input_binary]`".format(ctx.prefix))
return
try:
msg = int(input_binary, 2)
except Exception:
msg = "I couldn't make that conversion!"
await ctx.send(msg)
@commands.command(pass_context=True)
async def intbin(self, ctx, *, input_int = None):
"""Converts the input integer to its binary representation."""
if input_int == None:
await ctx.send("Usage: `{}intbin [input_int]`".format(ctx.prefix))
return
try:
input_int = int(input_int)
except Exception:
await ctx.send("Input must be an integer.")
return
await ctx.send("{:08b}".format(input_int))
@commands.command(pass_context=True)
async def encode(self, ctx, value = None , from_type = None, *, to_type = None):
"""Data converter from ascii <--> hex <--> base64."""
if value == None or from_type == None or to_type == None:
msg = 'Usage: `{}encode "[value]" [from_type] [to_type]`\nTypes include ascii, hex, and base64.'.format(ctx.prefix)
await ctx.send(msg)
return
types = [ "base64", "hex", "ascii" ]
if not from_type.lower() in types:
await ctx.send("Invalid *from* type!")
return
if not to_type.lower() in types:
await ctx.send("Invalid *to* type!")
return
if from_type.lower() == to_type.lower():
await ctx.send("*Poof!* Your encoding was done before it started!")
return
try:
if from_type.lower() == "base64":
if to_type.lower() == "hex":
await ctx.send(self.suppressed(ctx.guild, self._base64_to_hex(value)))
return
elif to_type.lower() == "ascii":
await ctx.send(self.suppressed(ctx.guild, self._base64_to_ascii(value)))
return
elif from_type.lower() == "hex":
if to_type.lower() == "ascii":
await ctx.send(self.suppressed(ctx.guild, self._hex_to_ascii(value)))
return
elif to_type.lower() == "base64":
await ctx.send(self.suppressed(ctx.guild, self._hex_to_base64(value)))
return
elif from_type.lower() == "ascii":
if to_type.lower() == "hex":
await ctx.send(self.suppressed(ctx.guild, self._ascii_to_hex(value)))
return
elif to_type.lower() == "base64":
await ctx.send(self.suppressed(ctx.guild, self._ascii_to_base64(value)))
return
except Exception:
await ctx.send("I couldn't make that conversion!")
return
|
15,005 | 9707000f1245d9e24a5314f1def23eaa87fea667 | import sys
from PyQt5 import QtWidgets
import windows.menu_window as menu
from ui.login import Ui_Login
class Login(QtWidgets.QMainWindow):
def __init__(self):
super(Login, self).__init__()
self.ui = Ui_Login()
self.ui.setupUi(self)
self.ui.pushButton_menu.setDisabled(True)
self.ui.pushButton_menu.clicked.connect(self.start_menu)
self.ui.pushButton_exit.clicked.connect(self.close)
self.ui.lineEdit_db.setText("face_rec_app")
self.ui.lineEdit_user.setText("postgres")
self.ui.lineEdit_password.setText("1234")
self.ui.lineEdit_host.setText("127.0.0.1")
def start_menu(self):
info = []
info.append(self.ui.lineEdit_db.text())
info.append(self.ui.lineEdit_user.text())
info.append(self.ui.lineEdit_password.text())
info.append(self.ui.lineEdit_host.text())
self.open_menu = menu.Menu(info)
self.open_menu.show()
self.close()
if __name__ == '__main__':
app = QtWidgets.QApplication([])
application = Login()
application.show()
sys.exit(app.exec())
|
15,006 | 16276f6abdfeefc4dadb670a57bb58bc07a78853 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Save and restore tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
class SaverTest(tf.test.TestCase):
"""Save and restore tests."""
def testIris(self):
path = tf.test.get_temp_dir() + '/tmp.saver'
random.seed(42)
iris = datasets.load_iris()
classifier = learn.TensorFlowLinearClassifier(n_classes=3)
classifier.fit(iris.data, iris.target)
classifier.save(path)
# TODO(ipolosukhin): Remove or restore.
# new_classifier = learn.TensorFlowEstimator.restore(path)
# self.assertEqual(type(new_classifier), type(classifier))
# score = accuracy_score(iris.target, new_classifier.predict(iris.data))
# self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
def testCustomModel(self):
path = tf.test.get_temp_dir() + '/tmp.saver2'
random.seed(42)
iris = datasets.load_iris()
def _custom_model(x, y):
return learn.models.logistic_regression(x, y)
classifier = learn.TensorFlowEstimator(model_fn=_custom_model, n_classes=3)
classifier.fit(iris.data, iris.target)
classifier.save(path)
# TODO(ipolosukhin): Remove or restore.
# new_classifier = learn.TensorFlowEstimator.restore(path)
# self.assertEqual(type(new_classifier), type(classifier))
# score = accuracy_score(iris.target, new_classifier.predict(iris.data))
# self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
def testDNN(self):
path = tf.test.get_temp_dir() + '/tmp_saver3'
random.seed(42)
iris = datasets.load_iris()
classifier = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3)
classifier.fit(iris.data, iris.target)
classifier.save(path)
# TODO(ipolosukhin): Remove or restore.
# new_classifier = learn.TensorFlowEstimator.restore(path)
# self.assertEqual(type(new_classifier), type(classifier))
# score = accuracy_score(iris.target, new_classifier.predict(iris.data))
# self.assertGreater(score, 0.5, 'Failed with score = {0}'.format(score))
def testNoFolder(self):
with self.assertRaises(ValueError):
learn.TensorFlowEstimator.restore('no_model_path')
def testNoCheckpoints(self):
path = tf.test.get_temp_dir() + '/tmp/tmp.saver4'
random.seed(42)
iris = datasets.load_iris()
classifier = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3)
classifier.fit(iris.data, iris.target)
classifier.save(path)
# TODO(ipolosukhin): Remove or restore.
# os.remove(os.path.join(path, 'checkpoint'))
# with self.assertRaises(NotImplementedError):
# learn.TensorFlowEstimator.restore(path)
if __name__ == '__main__':
tf.test.main()
|
15,007 | 140e730e34d811c1bd3a99c3d66e2dd0b3da65de | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 12 04:09:01 2021
@author: ilkin
"""
def choices():
print("Please choose what you would like to do.")
choice = int(input("For Sigining Up Type 1 and For Signing in Type 2: "))
if choice == 1:
return getdetails()
elif choice == 2:
return checkdetails()
else:
raise TypeError
def getdetails():
print("Please Provide")
name = str(input("Name: "))
password = str(input("Password: "))
f = open("User_Data.txt",'r')
info = f.read()
if name in info:
return "Name Unavailable. Please Try Again"
f.close()
f = open("User_Data.txt",'w')
info = info + " " +name + " " + password
f.write(info)
def checkdetails():
print("Please Provide")
name = str(input("Name: "))
password = str(input("Password: "))
f = open("User_Data.txt",'r')
info = f.read()
info = info.split()
if name in info:
index = info.index(name) + 1
usr_password = info[index]
if usr_password == password:
return "Welcome Back, " + name
else:
return "Password entered is wrong"
else:
return "Name not found. Please Sign Up."
print(choices()) |
15,008 | 9f3dcc05db2d191ccf67ea3479d8bd52048f6d27 | from termcolor import colored
logFile = None
def open_log(path):
global logFile
logFile = open(path+"/log.txt","a")
def info(*msg):
global logFile
s = "[I] " + " ".join(list(map(str, msg)))
print(colored(s, "green"))
logFile.write(s+"\n")
def debug(*msg):
global logFile
s = "[D] " + " ".join(list(map(str, msg)))
print(colored(s, "yellow"))
logFile.write(s+"\n")
def error(*msg):
global logFile
s = "[E] " + " ".join(list(map(str, msg)))
print(colored(s, "red"))
logFile.write(s+"\n")
def close_log():
global logFile
logFile.close() |
15,009 | 4b0b9c54cf213cf4fc1e01402442b492ba5b6f5a | from datetime import datetime
import os
import math
import tensorflow as tf
import dataset
from utils import add_summaries
from utils import align_image
from utils import convert_rgb_to_y
from utils import convert_rgb_to_ycbcr
from utils import convert_y_and_cbcr_to_rgb
from utils import resize_image
from utils import save_image
from utils import load_image
from utils import calc_psnr_and_ssim
from utils import get_validation_files
class Dcscn:
def __init__(self, with_restore=False):
# Scale factor for Super Resolution (should be 2 or more)
self.scale = 2
# Number of feature extraction layers
self.layers = 12
# Number of image channels used. Now it should be 1. using only Y from YCbCr.
self.input_channel = 1
self.output_channel = 1
# Number of filters of first feature-extraction CNNs
self.filters = 196
# Number of filters of last feature-extraction CNNs
self.min_filters = 48
# Number of CNN filters are decayed
# from [filters] to [min_filters] by this gamma
self.filters_decay_gamma = 1.5
# Initial weight stddev (won't be used when you use he or xavier initializer)
self.weight_dev = 0.01
# Output nodes should be kept by this probability. If 1, don't use dropout.
self.dropout_rate = 0.8
# Use batch normalization after each CNNs
self.batch_norm = False
# Norm for gradient clipping. If it's <= 0 we don't use gradient clipping.
self.clipping_norm = 5
# L2 decay
self.l2_decay = 0.00003
# Number of mini-batch images for training
self.batch_size = 5
self.initial_learning_rate = 0.0002
self.H = []
self.Weights = []
self.Biases = []
# Restore model path
self.is_use_restore = False
if with_restore:
self.is_use_restore = True
self.restore_model_path = with_restore
# Build graph
x, y, x2, learning_rate, dropout, is_training = self.placeholders(
input_channel=self.input_channel, output_channel=self.output_channel
)
self.x = x
self.x2 = x2
self.y = y
self.learning_rate = learning_rate
self.dropout = dropout
self.is_training = is_training
self.y_hat = self.forward(self.x, self.x2, self.dropout)
def _he_initializer(self, shape):
n = shape[0] * shape[1] * shape[2]
stddev = math.sqrt(2.0 / n)
return tf.truncated_normal(shape=shape, stddev=stddev)
def _weight(self, shape, name="weight"):
initial = self._he_initializer(shape)
return tf.Variable(initial, name=name)
def _bias(self, shape, initial_value=0.0, name="bias"):
initial = tf.constant(initial_value, shape=shape)
return tf.Variable(initial, name=name)
def _conv2d(self, input, w, stride, bias=None, use_batch_norm=False, name=""):
output = tf.nn.conv2d(
input,
w,
strides=[1, stride, stride, 1],
padding="SAME",
name=name + "_conv",
)
if bias is not None:
output = tf.add(output, bias, name=name + "_add")
if use_batch_norm:
output = tf.layers.batch_normalization(
output, training=self.is_training, name="BN"
)
return output
def _prelu(self, input, features, name=""):
with tf.variable_scope("prelu"):
alphas = tf.Variable(
tf.constant(0.1, shape=[features]), name=name + "_prelu"
)
output = tf.nn.relu(input) + tf.multiply(alphas, (input - tf.abs(input))) * 0.5
return output
def _convolutional_block(
self,
name,
input,
kernel_size,
input_feature_num,
output_feature_num,
use_batch_norm=False,
dropout_rate=1.0,
dropout=None,
):
with tf.variable_scope(name):
shape_of_weight = [
kernel_size,
kernel_size,
input_feature_num,
output_feature_num,
]
w = self._weight(shape=shape_of_weight, name="conv_W")
shape_of_bias = [output_feature_num]
b = self._bias(shape=shape_of_bias, name="conv_B")
z = self._conv2d(
input, w, stride=1, bias=b, use_batch_norm=use_batch_norm, name=name
)
a = self._prelu(z, output_feature_num, name=name)
if dropout_rate < 1.0:
a = tf.nn.dropout(a, dropout, name="dropout")
self.H.append(a)
add_summaries("weight", name, w, save_stddev=True, save_mean=True)
add_summaries("output", name, a, save_stddev=True, save_mean=True)
add_summaries("bias", name, b, save_stddev=True, save_mean=True)
# # Save image
# shapes = w.get_shape().as_list()
# weights = tf.reshape(w, [shapes[0], shapes[1], shapes[2] * shapes[3]])
# weights_transposed = tf.transpose(weights, [2, 0, 1])
# weights_transposed = tf.reshape(
# weights_transposed, [shapes[2] * shapes[3], shapes[0], shapes[1], 1]
# )
# tf.summary.image("weights", weights_transposed, max_outputs=6)
self.Weights.append(w)
self.Biases.append(b)
return a
def _pixel_shuffler(
self, name, input, kernel_size, scale, input_feature_num, output_feature_num
):
with tf.variable_scope(name):
self._convolutional_block(
name + "_CNN",
input,
kernel_size,
input_feature_num=input_feature_num,
output_feature_num=scale * scale * output_feature_num,
use_batch_norm=False,
)
self.H.append(tf.depth_to_space(self.H[-1], scale))
def placeholders(self, input_channel, output_channel):
x = tf.placeholder(
tf.float32, shape=[None, None, None, input_channel], name="x"
)
y = tf.placeholder(
tf.float32, shape=[None, None, None, output_channel], name="y"
)
x2 = tf.placeholder(
tf.float32, shape=[None, None, None, output_channel], name="x2"
)
learning_rate = tf.placeholder(tf.float32, shape=[], name="LearningRate")
dropout = tf.placeholder(tf.float32, shape=[], name="dropout_keep_rate")
is_training = tf.placeholder(tf.bool, name="is_training")
return x, y, x2, learning_rate, dropout, is_training
def _calc_filters(self, first, last, layers, decay):
return [
int((first - last) * (1 - pow(i / float(layers - 1), 1.0 / decay)) + last)
for i in range(layers)
]
def forward(self, input, x2, dropout):
# building feature extraction layers
total_output_feature_num = 0
with tf.name_scope("X_"):
mean_var = tf.reduce_mean(input)
stddev_var = tf.sqrt(tf.reduce_mean(tf.square(input - mean_var)))
tf.summary.scalar("output/mean", mean_var)
tf.summary.scalar("output/stddev", stddev_var)
filters = self._calc_filters(
self.filters, self.min_filters, self.layers, self.filters_decay_gamma
)
input_filter = self.input_channel
for i, filter in enumerate(filters):
self._convolutional_block(
"CNN%d" % (i + 1),
input,
kernel_size=3,
input_feature_num=input_filter,
output_feature_num=filter,
use_batch_norm=self.batch_norm,
dropout_rate=self.dropout_rate,
dropout=dropout,
)
input_filter = filter
input = self.H[-1]
total_output_feature_num += filter
with tf.variable_scope("Concat"):
self.H_concat = tf.concat(self.H, 3, name="H_concat")
# building reconstruction layers
self._convolutional_block(
"A1",
self.H_concat,
kernel_size=1,
input_feature_num=total_output_feature_num,
output_feature_num=64,
dropout_rate=self.dropout_rate,
dropout=dropout,
)
self._convolutional_block(
"B1",
self.H_concat,
kernel_size=1,
input_feature_num=total_output_feature_num,
output_feature_num=32,
dropout_rate=self.dropout_rate,
dropout=dropout,
)
self._convolutional_block(
"B2",
self.H[-1],
kernel_size=3,
input_feature_num=32,
output_feature_num=32,
dropout_rate=self.dropout_rate,
dropout=dropout,
)
self.H.append(tf.concat([self.H[-1], self.H[-3]], 3, name="Concat2"))
# building upsampling layer
pixel_shuffler_channel = 64 + 32
self._pixel_shuffler(
"Up-PS",
self.H[-1],
kernel_size=3,
scale=self.scale,
input_feature_num=pixel_shuffler_channel,
output_feature_num=pixel_shuffler_channel,
)
self._convolutional_block(
"R-CNN0",
self.H[-1],
kernel_size=3,
input_feature_num=pixel_shuffler_channel,
output_feature_num=self.output_channel,
)
y_hat = tf.add(self.H[-1], x2, name="output")
with tf.name_scope("Y_"):
mean = tf.reduce_mean(y_hat)
stddev = tf.sqrt(tf.reduce_mean(tf.square(y_hat - mean)))
tf.summary.scalar("output/mean", mean)
tf.summary.scalar("output/stddev", stddev)
tf.summary.histogram("output", y_hat)
return y_hat
def loss(self, y_hat, y):
diff = tf.subtract(y_hat, y, "diff")
mse = tf.reduce_mean(tf.square(diff, name="diff_square"), name="mse")
image_loss = tf.identity(mse, name="image_loss")
l2_norm_losses = [tf.nn.l2_loss(w) for w in self.Weights]
l2_norm_loss = self.l2_decay + tf.add_n(l2_norm_losses)
loss = image_loss + l2_norm_loss
tf.summary.scalar("Loss", loss)
tf.summary.scalar("L2WeightDecayLoss", l2_norm_loss)
return loss, image_loss, mse
def optimizer(self, loss, learning_rate):
beta1 = 0.9
beta2 = 0.999
if self.batch_norm:
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
optimizer = tf.train.AdamOptimizer(
learning_rate, beta1=beta1, beta2=beta2, epsilon=1e-8
)
else:
optimizer = tf.train.AdamOptimizer(
learning_rate, beta1=beta1, beta2=beta2, epsilon=1e-8
)
trainables = tf.trainable_variables()
grads = tf.gradients(loss, trainables)
if self.clipping_norm > 0:
clipped_grads, _ = tf.clip_by_global_norm(
grads, clip_norm=self.clipping_norm
)
grad_var_pairs = zip(clipped_grads, trainables)
training_optimizer = optimizer.apply_gradients(grad_var_pairs)
else:
training_optimizer = optimizer.minimize(loss)
# Save weights
# for i in range(len(grads)):
# var = grads[i]
# mean_var = tf.reduce_mean(var)
# stddev_var = tf.sqrt(tf.reduce_mean(tf.square(var - mean_var)))
# # tf.summary.scalar("{}/mean".format(var.name), var)
# # tf.summary.scalar("{}/stddev".format(grads[i].name), stddev_var)
# # tf.summary.histogram(grads[i].name, var)
return training_optimizer
def train(self, output_path, validation_dataset=None):
loss, image_loss, mse = self.loss(self.y_hat, self.y)
training = self.optimizer(loss, self.learning_rate)
loader = dataset.Loader(
"bsd200", scale=self.scale, image_size=48, batch_size=self.batch_size
)
summary = tf.summary.merge_all()
with tf.Session() as sess:
log_dir = datetime.now().strftime("%Y%m%d%H%M%S")
writer = tf.summary.FileWriter("logs/{}".format(log_dir), graph=sess.graph)
sess.run(tf.global_variables_initializer())
for i in range(8000 * 100):
input_images, upscaled_images, original_images = loader.feed()
feed_dict = {
self.x: input_images,
self.x2: upscaled_images,
self.y: original_images,
self.learning_rate: self.initial_learning_rate,
self.dropout: self.dropout_rate,
self.is_training: 1,
}
_, s_loss, s_mse = sess.run([training, loss, mse], feed_dict=feed_dict)
print("Step: {}, loss: {}, mse: {}".format(i, s_loss, s_mse))
if i % 100 == 0:
summarized, _ = sess.run([summary, loss], feed_dict=feed_dict)
writer.add_summary(summarized, i)
# Learning rate
lr_summary = tf.Summary(
value=[
tf.Summary.Value(
tag="LR", simple_value=self.initial_learning_rate
)
]
)
writer.add_summary(lr_summary, i)
if i % 8000 == 0:
# Metrics
if validation_dataset is not None:
validation_files = get_validation_files(validation_dataset)
psnr, ssim = self.calc_metrics(validation_files)
print("PSNR: {}, SSSIM: {}".format(psnr, ssim))
psnr_summary = tf.Summary(
value=[tf.Summary.Value(tag="PSNR", simple_value=psnr)]
)
ssim_summary = tf.Summary(
value=[tf.Summary.Value(tag="SSIM", simple_value=ssim)]
)
writer.add_summary(psnr_summary, i)
writer.add_summary(ssim_summary, i)
writer.close()
# Save model
output_dir = os.path.dirname(os.path.join(os.getcwd(), output_path))
os.makedirs(output_dir, exist_ok=True)
self.save(sess, output_path)
def run(self, input_image, input_bicubic_image):
h, w = input_image.shape[:2]
ch = input_image.shape[2] if len(input_image.shape) > 2 else 1
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Restore model
if self.is_use_restore:
restore_path = os.path.join(
os.getcwd(), self.restore_model_path + ".ckpt"
)
saver = tf.train.Saver()
saver.restore(sess, restore_path)
feed_dict = {
self.x: input_image.reshape(1, h, w, ch),
self.x2: input_bicubic_image.reshape(
1,
self.scale * input_image.shape[0],
self.scale * input_image.shape[1],
ch,
),
self.learning_rate: self.initial_learning_rate,
self.dropout: 1.0,
self.is_training: 0,
}
y_hat = sess.run([self.y_hat], feed_dict=feed_dict)
output = y_hat[0][0]
return output
def inference(self, input_image, output_dir, save_images=False):
# Create scaled image
scaled_image = resize_image(input_image, 2)
# Create y and scaled y image
input_y_image = convert_rgb_to_y(input_image)
scaled_y_image = resize_image(input_y_image, self.scale)
output_y_image = self.run(input_y_image, scaled_y_image)
# Create result image
scaled_ycbcr_image = convert_rgb_to_ycbcr(scaled_image)
result_image = convert_y_and_cbcr_to_rgb(
output_y_image, scaled_ycbcr_image[:, :, 1:3]
)
if save_images:
save_image(input_image, "{}/original.jpg".format(output_dir))
save_image(scaled_image, "{}/bicubic.jpg".format(output_dir))
save_image(
scaled_y_image, "{}/bicubic_y.jpg".format(output_dir), is_rgb=False
)
save_image(
output_y_image, "{}/result_y.jpg".format(output_dir), is_rgb=False
)
save_image(result_image, "{}/result.jpg".format(output_dir))
return result_image
def evaluate(self, filepath):
input_image = align_image(load_image(filepath), self.scale)
input_y_image = resize_image(convert_rgb_to_y(input_image), 1 / self.scale)
input_scaled_y_image = resize_image(input_y_image, self.scale)
output_y_image = self.run(input_y_image, input_scaled_y_image)
ground_truth_y_image = convert_rgb_to_y(input_image)
return calc_psnr_and_ssim(
ground_truth_y_image, output_y_image, border=self.scale
)
def save(self, sess, name=""):
filename = "{}.ckpt".format(name)
saver = tf.train.Saver(max_to_keep=None)
saver.save(sess, filename)
def calc_metrics(self, files):
psnrs = 0
ssims = 0
for file in files:
psnr, ssim = self.evaluate(file)
psnrs += psnr
ssims += ssim
psnr /= len(files)
ssim = ssims / len(files)
return psnr, ssim
def metrics(self, output, labels):
output_transposed = output if self.data_format == 'NHWC' else tf.transpose(output, perm=[0, 2, 3, 1])
output = tf.Print(output, [tf.shape(output)], message="shape of output:", summarize=1000)
output = tf.Print(output, [output], message="value of output:", summarize=1000)
# labels = tf.Print(labels, [labels])
# labels = tf.Print(labels)
# labels = tf.image.rgb_to_yuv(labels)
results = {}
updates = []
with tf.name_scope('metrics_cals'):
mean_squared_error, mean_squared_error_update = tf.metrics.mean_squared_error(
labels,
output_transposed,
)
results["mean_squared_error"] = mean_squared_error
updates.append(mean_squared_error_update)
psnr_array = tf.image.psnr(labels, output, max_val=1.0)
psnr, psnr_update = tf.metrics.mean(psnr_array)
results["psnr"] = psnr
updates.append(psnr_update)
ssim_array = tf.image.ssim(labels, output, max_val=1.0)
ssim, ssim_update = tf.metrics.mean(ssim_array)
results["ssim"] = ssim
updates.append(ssim_update)
# merge all updates
updates_op = tf.group(*updates)
return results, updates_op
|
15,010 | 0285dba2fc481a158794acbd29e6c3d9a8b5bc61 | '''
Created on Sep 6, 2013
https://projecteuler.net/problem=40
An irrational decimal fraction is created by concatenating
the positive integers:
0.12345678910
!!!1!!!
112131415161718192021...
It can be seen that the 12th digit of the fractional part
is 1.
If dn represents the nth digit of the fractional part, find
the value of the following expression.
d1 * d10 * d100 * d1000 * d10000 * d100000 * d1000000
@author: Cawb07
'''
import time
start = time.time()
i = 1
s = "."
while len(s) < 1000001:
s += str(i)
i += 1
print int(s[1])*int(s[10])*int(s[100])*int(s[1000])*\
int(s[10000])*int(s[100000])*int(s[1000000])
elapsed = time.time() - start
print "The elapsed time is %s seconds." % (elapsed)
|
15,011 | 633c895d8cffbf6673f5e3533ebb049c129849f4 | from django.urls import include, path, re_path
from django.contrib import admin
from rest_framework import routers
from api import views
router = routers.DefaultRouter()
router.register(r'users', views.UserApiViewSet)
router.register(r'agents', views.AgentApiViewSet)
router.register(r'events', views.EventApiViewSet)
urlpatterns = [
path('', include(router.urls)),
path('admin/', admin.site.urls),
path('environments/', views.EnvironmentListOnlyAPIView.as_view()),
path('levels/', views.LevelListOnlyAPIView.as_view()),
path('auth/', include('rest_auth.urls')),
]
|
15,012 | 4ac877966812cb5454660e3d652f396209578b5a | from . import teachers
|
15,013 | 2013747af44d8fd3a55fac7f3d77c46fd48163c0 | # Generated by Django 3.2 on 2021-07-15 20:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('c_name_1', models.CharField(max_length=200)),
('c_num_1', models.CharField(max_length=200)),
('c_email_1', models.CharField(max_length=200)),
('c_name_2', models.CharField(max_length=200)),
('c_num_2', models.CharField(max_length=200)),
('c_email_2', models.CharField(max_length=200)),
('c_name_3', models.CharField(max_length=200)),
('c_num_3', models.CharField(max_length=200)),
('c_email_3', models.CharField(max_length=200)),
('c_name_4', models.CharField(max_length=200)),
('c_num_4', models.CharField(max_length=200)),
('c_email_4', models.CharField(max_length=200)),
('c_name_5', models.CharField(max_length=200)),
('c_num_5', models.CharField(max_length=200)),
('c_email_5', models.CharField(max_length=200)),
],
),
]
|
15,014 | 469f19e83bdccb194d0e9f2b42cd5f5ffa060d9a |
from sql2doc.db import DB
import numpy as np
from sql2doc.pen import MyPen
from configparser import ConfigParser
cf = ConfigParser()
cf.read('conf/conf.ini')
def getTablesNameComment(db_obj,schema_name):
"""
获取库中所有表名和表注释
:@param db_obj:数据库连接对象
:@param shcama_name:数据库名称
:return: ([name1,name2,],[com1,com2])
"""
sql = 'select ' \
'table_name,' \
'table_rows,' \
'table_comment ' \
'from ' \
'information_schema.`TABLES` ' \
'where ' \
'table_schema = %s '
r = db_obj.executeSql(sql=sql, args=schema_name, returnDict=True)
table_names = []
table_comments = []
for i in r:
table_names.append(i['table_name'])
table_comments.append(i['table_comment'])
return table_names,table_comments
pass
def getColumnsInfo(db_obj, schema_name, table_name):
"""
获取表中的字段名,字段类型,字段注释。。。
:param db_obj: 数据库连接对象
:param schema_name: 数据库名
:param table_name: 表名
:return: ([colName1,colName2,],[dataType1,dataType2,],[com1,com2,])
"""
sql = 'select ' \
'table_name,' \
'column_name, ' \
'column_type, ' \
'column_comment ' \
'from ' \
'information_schema.`COLUMNS` ' \
'where ' \
'table_schema = %s and table_name = %s '
args = (schema_name, table_name)
# print(db_obj.executeSql(sql,args,True))
columns_name = []
columns_type = []
columns_comment = []
for i in db_obj.executeSql(sql,args,True):
columns_name.append(i['column_name'])
columns_type.append(i['column_type'])
columns_comment.append(i['column_comment'])
# 添加序列数
columns_index = np.linspace(1, len(columns_name), len(columns_name), dtype=int)
return columns_index, columns_name, columns_type, columns_comment
pass
def getTableInfo():
"""
获取表信息
:return: table_info = {table_name:[t1,t2,],table_comments:"表注释",
column_name:[col1,col2,],column_type:[type1,type2],
column_comment:[com1,com2]}
"""
table_info = {}
db_obj = DB()
# 读取配置文件
items = dict(cf.items('mysql'))
schema_name = items['db_name']
print("你需要导出的是:%s 库" %(schema_name))
table_names,comments = getTablesNameComment(db_obj,schema_name)
table_info['table_name'] = table_names
table_info['table_comment'] = comments
# print(table_info
column_index = []
column_name = []
column_type = []
column_comment = []
for table_name in table_info['table_name']:
columns_index, columns_name, columns_type, columns_comment = getColumnsInfo(db_obj, schema_name, table_name)
column_index.append(columns_index)
column_name.append(columns_name)
column_type.append(columns_type)
column_comment.append(columns_comment)
table_info['column_index'] = column_index
table_info['column_name'] = column_name
table_info['column_type'] = column_type
table_info['column_comment'] = column_comment
return table_info
pass
def write2word(table_info):
"""
将数据输出到word中
:param table_info_reduced:处理后的表结构信息
:return:
"""
pen = MyPen(file_path)
pen.draw(table_info)
pass
def main():
"""程序入口"""
table_info = getTableInfo()
write2word(table_info)
pass
if __name__ == '__main__':
file_path = 'cs.doc'
main() |
15,015 | 7316dfc1f296b285583896250afed048c61e633c | import pytest
from brain_brew.build_tasks.build_task_generic import BuildTaskGeneric
from tests.representation.configuration.test_global_config import global_config
class TestSplitTags:
@pytest.mark.parametrize("str_to_split, expected_result", [
("tags1, tags2", ["tags1", "tags2"]),
("tags1 tags2", ["tags1", "tags2"]),
("tags1; tags2", ["tags1", "tags2"]),
("tags1 tags2", ["tags1", "tags2"]),
("tags1, tags2, tags3, tags4, tags5, tags6, tags7, tags8, tags9",
["tags1", "tags2", "tags3", "tags4", "tags5", "tags6", "tags7", "tags8", "tags9"]),
("tags1, tags2; tags3 tags4 tags5, tags6; tags7 tags8, tags9",
["tags1", "tags2", "tags3", "tags4", "tags5", "tags6", "tags7", "tags8", "tags9"]),
("tags1,tags2", ["tags1", "tags2"]),
("tags1;tags2", ["tags1", "tags2"]),
("tags1, tags2", ["tags1", "tags2"]),
("tags1; tags2", ["tags1", "tags2"]),
])
def test_runs(self, str_to_split, expected_result):
assert BuildTaskGeneric.split_tags(str_to_split) == expected_result
class TestJoinTags:
@pytest.mark.parametrize("join_with, expected_result", [
(", ", "test, test1, test2")
])
def test_joins(self, global_config, join_with, expected_result):
list_to_join = ["test", "test1", "test2"]
global_config.flags.join_values_with = join_with
assert BuildTaskGeneric.join_tags(list_to_join) == expected_result
|
15,016 | eac688240d68853292a227265bcb968585e56890 | # https://en.wikipedia.org/wiki/Martingale_(betting_system)
import random
import time
def bet_to_win():
count = 1
while random.choice(['win', 'lose']) == 'lose':
count += 1
return count
def main():
max_count = 0
earning = 0
while True:
count = bet_to_win()
if max_count < count:
max_count = count
earning = earning + 1
print('earning: {}, count: {}, max count: {}, max stake: {}'.format(
earning, count, max_count, pow(2, max_count - 1)))
time.sleep(0.01)
if __name__ == '__main__':
main()
|
15,017 | 9a2bec767d6c1bd44391a440f8b209c8a1598f19 | from setuptools import setup, find_packages
setup(
name="gitconfigs",
version="0.1",
packages=find_packages(),
scripts=[],
install_requires=[
'click>=7.0'
],
author="Emmanuel Bavoux",
author_email="emmanuel.bavoux@gmail.com",
description="Some description")
|
15,018 | dc60ba9575b9fa68fef5442bee8656e3b95e94a4 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# code by Mr_Java
# email:***@qianxin.com
import re
import time
import os
import json
import threading
import time
import sys
if sys.getdefaultencoding() != 'utf-8':
reload(sys)
sys.setdefaultencoding('utf-8')
path = 'D:\\csv-to-json\\'
all = []
for fpathe, dirs, fs in os.walk(path):
for f in fs:
x = os.path.join(fpathe, f)
xx = x.strip('\n')
# print xx
if '.csv' in xx:
# print xx
# xxx=xx.replace('\\','\\\\')
all.append(xx)
for i in all:
fp = open(i,'r')
filename = i[-10:].split('.')[0]
code = i[-10:].split('.')[0]
for line in fp.readlines()[1:]:
line = line.strip('\n')+','+code
# print line
insert_data = line.split(',')
# insert(insert_arr = insert_data)
json_data = '{"TranID":'+insert_data[0]+',"Time":"'+insert_data[1]+'","Price":"'+insert_data[2]+'","Volume":'+insert_data[3]+',"SaleOrderVolume":'+insert_data[4]+',"BuyOrderVolume":'+insert_data[5]+',' \
'"Type":"'+insert_data[6]+'","SaleOrderID":'+insert_data[7]+',"SaleOrderPrice":"'+insert_data[8]+'","BuyOrderID":'+insert_data[9]+',"BuyOrderPrice":"'+insert_data[10]+'","code":"'+insert_data[11]+'"}'
print json_data
# fpp_demod = open('demo.json','a')
# fpp_demod.write('%s\n'%str(json_data))
fpp = open('E:\\mysql\\14\\'+filename+'.txt','a')
fpp.write('%s\n'%str(line))
|
15,019 | 17ac627c2850e2bc357efae6c486da9ebe6dde3c | incio = None
final = None
lista = None
def read_line():
try:
# read for Python 2.x
return raw_input()
except NameError:
# read for Python 3.x
return input()
lista = read_line().split(" ")
inicio = int(lista[0])
final = int(lista[1])
if inicio >= 0 and final <= 2:
print("nova")
elif final > inicio and final <= 96:
print("crescente")
elif inicio >= final and final <= 96:
print("minguante")
else:
print("cheia") |
15,020 | 80633fe01d7b9cee9659dfb98c1c1315d4af9743 | # Smash script
import code # code.interact(local=locals())
from sklearn.datasets import load_iris
iris = load_iris()
from sklearn import neighbors
X, y = iris.data, iris.target
code.interact(local=locals())
# classify as whichever one it is closest to
knn = neighbors.KNeighborsClassifier(n_neighbors=1)
knn.fit(X, y)
# What kind of iris has 3cm x 5cm sepal and 4cm x 2cm petal?
print iris.target_names[knn.predict([[3, 5, 4, 2]])]
'''
Cross Validation
'''
from sklearn import cross_validation
X_train, X_test, y_train, y_test = cross_validation.train_test_split(iris.data, iris.target, test_size=0.4, random_state=0)
knn = neighbors.KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train, y_train)
predicted = knn.predict(X_test)
from sklearn import metrics
print metrics.classification_report(y_test, predicted)
print metrics.confusion_matrix(y_test, predicted)
print metrics.f1_score(y_test, predicted)
from sklearn.cross_validation import cross_val_score
scores = cross_val_score(knn, iris.data, iris.target, cv=5)
df = pd.read_csv('/Users/bcutrell/Desktop/clinton_data.csv')
def get_xy(x, y):
r = y + ' ~ ' + " + ".join(x)
return dmatrices(r, data=df, return_type='dataframe')
def run_regre(x, y):
''' used to smash possible regressions '''
x = list(x)
if len(x) > 1:
r = y + ' ~ ' + " + ".join(x)
elif len(x) == 0:
return None
else:
r = y + ' ~ ' + x[0]
y, X = dmatrices(r, data=df, return_type='dataframe')
model = LinearRegression()
model = model.fit(X,y)
return model.score(X,y)
for L in range(0, len(x)+1):
for subset in itertools.combinations(x, L):
new_regr = run_regre(x, y)
print subset, new_regr
model = linear_model.RidgeCV(alphas=[0.1, 1.0, 10.0], store_cv_values=True)
model.fit(x_test, y_test)
s1 = model.score(X,y)
print model.score(X,y)
model.fit(x_train, y_train)
headers = ['CountyName',
'PercentvotingforClintonin1992',
'MedianAge',
'MeanSavings',
'PerCapitaIncome',
'PercentinPoverty',
'PercentVeterans',
'PercentFemale',
'PopulationDensity',
'PercentinNursingHomes',
'CrimeIndex']
y_attr = 'PercentvotingforClintonin1992'
x_attrs = ['MedianAge',
'PerCapitaIncome',
'PercentFemale',
'PopulationDensity']
df_1 = pd.DataFrame(df.row.str.split(',',1).tolist(), columns = ['County','CountyName'])
scoring = make_scorer(mean_squared_error, greater_is_better=False)
cv = KFold(X.shape[0], 10)
model = linear_model.RidgeCV(alphas=[0.1, 1.0, 10.0], cv=cv, scoring=scoring)
model.fit(x,y)
print model.coef_
print model.alpha_
print model.score(X,y)
def rename_columns(name, c):
if not c == 'Name' or c == 'Year':
return name + '_' + c.lower()
else:
return c.lower()
tbrady.rename(columns=lambda x: rename_columns('brady', x), inplace=True)
pmanning.rename(columns=lambda x: rename_columns('manning', x), inplace=True)
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10,10))
tbrady[x_attrs].pct_change().plot(ax=axes[0]).set_title('Brady')
pmanning[x_attrs].pct_change().plot(ax=axes[1]).set_title('Manning')
|
15,021 | 2cd4a29bca0b07d87f6b79a99ad68ed85201f900 | def isAlienSorted(words, order):
"""
:type words: List[str]
:type order: str
:rtype: bool
"""
d = {}
for i,c in enumerate(order):
d[c] = i
for i in range(0, len(words)-1):
j = i+1
x = words[i]
y = words[j]
for k in range(min(len(x),len(y))):
if x[k] !=y[k]:
if d[x[k]] > d[y[k]]:
return False
break
else:
if len(x) > len(y):
return False
return True
words = ["kuvp","q"]
words = ["apple","app"]
order = "abcdefghijklmnopqrstuvwxyz"
#order = "ngxlkthsjuoqcpavbfdermiywz"
print isAlienSorted(words, order)
|
15,022 | f7a5f42daf97bb94e4f04c65b27cfab3c7a8997d | # -*- coding:utf-8 -*-
def f1(a,b,c=0,*args,**kw):
print('a=',a,'b=',b,'c=',c,'args=',args,'kw=',kw)
f1(1,2,3,4,5)
|
15,023 | 34d10bfad3e99ffee070b74206aae2f7eca106f8 | # viết hoa chữ cái đầu chuỗi
user_str = 'trần tấn Dũng'
cap = user_str.capitalize() # chỉ có chữ Tran viết hoa, các chữ cái còn lại không viết hoa
print('cap: ',cap)
# viết hoa tất cả các chữ
up = user_str.upper()
print('upper: ', up)
# viết thường tất cả chuỗi
low = user_str.lower()
print('lower: ', low)
# hoán đổi chữ viết hoa thành chữ thường, chữ thường thành chữ viết hoa
swap = user_str.swapcase()
print('swapcase: ', swap)
# chuẩn hóa chuỗi, viết hoa các chữ cái đầu tiên
tit = user_str.title()
print('title: ', tit)
# căn giữa, trái, phải
cen = user_str.center(50,'-')
print(cen)
left = user_str.ljust(50,'*')
print(left)
# encode -> mã hóa
encode = user_str.encode()
print(encode)
# thay thế chuỗi
rep = user_str.replace('n','ng')
print(rep)
# loại bỏ ký tự đầu/cuối ~ trim
strip = user_str.strip()
|
15,024 | 5ba5c9796e3ed80722fd0f219a4beace6e9941b0 | from flask import Flask, request
import requests
import ujson
import os.path
import urllib
app = Flask(__name__)
@app.route('/')
def index():
return "Admin API"
@app.route('/admin')
def admin():
username = request.cookies.get("username")
if not username:
return {"Error": "Specify username in Cookie"}
username = urllib.quote(os.path.basename(username))
url = "http://permissions:5000/permissions/{}".format(username)
resp = requests.request(method="GET", url=url)
ret = ujson.loads(resp.text)
if resp.status_code == 200:
if "superadmin" in ret["roles"]:
return {"OK": "Superadmin Access granted"}
else:
e = u"Access denied. User has following roles: {}".format(ret["roles"])
return {"Error": e}, 401
else:
return {"Error": ret["Error"]}, 500
|
15,025 | 6981c21421dcb278d77298202f804634ffd42555 | import base58
import calendar
from datetime import datetime
def tb(l):
return b''.join(map(lambda x: x.to_bytes(1, 'big'), l))
base58_encodings = [
# Encoded | Decoded |
# prefix | len | prefix | len | Data type
(b"B", 51, tb([1, 52]), 32, u"block hash"),
(b"o", 51, tb([5, 116]), 32, u"operation hash"),
(b"Lo", 52, tb([133, 233]), 32, u"operation list hash"),
(b"LLo", 53, tb([29, 159, 109]), 32, u"operation list list hash"),
(b"P", 51, tb([2, 170]), 32, u"protocol hash"),
(b"Co", 52, tb([79, 199]), 32, u"context hash"),
(b"tz1", 36, tb([6, 161, 159]), 20, u"ed25519 public key hash"),
(b"tz2", 36, tb([6, 161, 161]), 20, u"secp256k1 public key hash"),
(b"tz3", 36, tb([6, 161, 164]), 20, u"p256 public key hash"),
(b"KT1", 36, tb([2, 90, 121]), 20, u"Originated address"),
(b"id", 30, tb([153, 103]), 16, u"cryptobox public key hash"),
(b'expr', 54, tb([13, 44, 64, 27]), 32, u'script expression'),
(b"edsk", 54, tb([13, 15, 58, 7]), 32, u"ed25519 seed"),
(b"edpk", 54, tb([13, 15, 37, 217]), 32, u"ed25519 public key"),
(b"spsk", 54, tb([17, 162, 224, 201]), 32, u"secp256k1 secret key"),
(b"p2sk", 54, tb([16, 81, 238, 189]), 32, u"p256 secret key"),
(b"edesk", 88, tb([7, 90, 60, 179, 41]), 56, u"ed25519 encrypted seed"),
(b"spesk", 88, tb([9, 237, 241, 174, 150]), 56, u"secp256k1 encrypted secret key"),
(b"p2esk", 88, tb([9, 48, 57, 115, 171]), 56, u"p256_encrypted_secret_key"),
(b"sppk", 55, tb([3, 254, 226, 86]), 33, u"secp256k1 public key"),
(b"p2pk", 55, tb([3, 178, 139, 127]), 33, u"p256 public key"),
(b"SSp", 53, tb([38, 248, 136]), 33, u"secp256k1 scalar"),
(b"GSp", 53, tb([5, 92, 0]), 33, u"secp256k1 element"),
(b"edsk", 98, tb([43, 246, 78, 7]), 64, u"ed25519 secret key"),
(b"edsig", 99, tb([9, 245, 205, 134, 18]), 64, u"ed25519 signature"),
(b"spsig", 99, tb([13, 115, 101, 19, 63]), 64, u"secp256k1 signature"),
(b"p2sig", 98, tb([54, 240, 44, 52]), 64, u"p256 signature"),
(b"sig", 96, tb([4, 130, 43]), 64, u"generic signature"),
(b'Net', 15, tb([87, 82, 0]), 4, u"chain id"),
]
operation_tags = {
'endorsement': 0,
'seed_nonce_revelation': 1,
'double_endorsement_evidence': 2,
'double_baking_evidence': 3,
'account_activation': 4,
'proposal': 5,
'ballot': 6,
'reveal': 7,
'transaction': 8,
'origination': 9,
'delegation': 10
}
def scrub_input(v) -> bytes:
if isinstance(v, str) and not isinstance(v, bytes):
try:
_ = int(v, 16)
except ValueError:
v = v.encode('ascii')
else:
if v.startswith('0x'):
v = v[2:]
v = bytes.fromhex(v)
if not isinstance(v, bytes):
raise TypeError(
"a bytes-like object is required (also str), not '%s'" %
type(v).__name__)
return v
def base58_decode(v: bytes) -> bytes:
""" Decode data using Base58 with checksum + validate binary prefix against known kinds and cut in the end.
:param v: Array of bytes (use string.encode())
:returns: bytes
"""
try:
prefix_len = next(
len(encoding[2])
for encoding in base58_encodings
if len(v) == encoding[1] and v.startswith(encoding[0])
)
except StopIteration:
raise ValueError('Invalid encoding, prefix or length mismatch.')
return base58.b58decode_check(v)[prefix_len:]
def base58_encode(v: bytes, prefix: bytes) -> bytes:
""" Encode data using Base58 with checksum and add an according binary prefix in the end.
:param v: Array of bytes
:param prefix: Human-readable prefix (use b'') e.g. b'tz', b'KT', etc
:returns: bytes (use string.decode())
"""
try:
encoding = next(
encoding
for encoding in base58_encodings
if len(v) == encoding[3] and prefix == encoding[0]
)
except StopIteration:
raise ValueError('Invalid encoding, prefix or length mismatch.')
return base58.b58encode_check(encoding[2] + v)
def _validate(v, prefixes: list):
v = scrub_input(v)
if any(map(lambda x: v.startswith(x), prefixes)):
base58_decode(v)
else:
raise ValueError('Unknown prefix.')
def validate_pkh(v):
""" Ensure parameter is a public key hash (starts with b'tz1', b'tz2', b'tz3')
:param v: string or bytes
:raises ValueError: if parameter is not a public key hash
"""
return _validate(v, prefixes=[b'tz1', b'tz2', b'tz3'])
def validate_sig(v):
""" Ensure parameter is a signature (starts with b'edsig', b'spsig', b'p2sig', b'sig')
:param v: string or bytes
:raises ValueError: if parameter is not a signature
"""
return _validate(v, prefixes=[b'edsig', b'spsig', b'p2sig', b'sig'])
def is_pkh(v) -> bool:
""" Check if value is a public key hash.
"""
try:
validate_pkh(v)
except (ValueError, TypeError):
return False
return True
def is_sig(v) -> bool:
""" Check if value is a signature.
"""
try:
validate_sig(v)
except (ValueError, TypeError):
return False
return True
def is_bh(v) -> bool:
""" Check if value is a block hash.
"""
try:
_validate(v, prefixes=[b'B'])
except (ValueError, TypeError):
return False
return True
def is_ogh(v) -> bool:
""" Check if value is an operation group hash.
"""
try:
_validate(v, prefixes=[b'o'])
except (ValueError, TypeError):
return False
return True
def is_kt(v) -> bool:
""" Check if value is a KT address.
"""
try:
_validate(v, prefixes=[b'KT1'])
except (ValueError, TypeError):
return False
return True
def is_key(v) -> bool:
""" Check if value is a public key.
"""
try:
_validate(v, prefixes=[b"edsk", b"edpk", b"spsk", b"p2sk", b"sppk", b"p2pk"])
except (ValueError, TypeError):
return False
return True
def is_chain_id(v) -> bool:
""" Check if value is a chain id.
"""
try:
_validate(v, prefixes=[b'Net'])
except (ValueError, TypeError):
return False
return True
def forge_nat(value) -> bytes:
""" Encode a number using LEB128 encoding (Zarith).
:param int value: the value to encode
:returns: encoded value
:rtype: bytes
"""
if value < 0:
raise ValueError('Value cannot be negative.')
buf = bytearray()
more = True
while more:
byte = value & 0x7f
value >>= 7
if value:
byte |= 0x80
else:
more = False
buf.append(byte)
return bytes(buf)
def forge_public_key(value) -> bytes:
""" Encode public key into bytes.
:param value: public key in in base58 form
"""
prefix = value[:4]
res = base58.b58decode_check(value)[4:]
if prefix == 'edpk':
return b'\x00' + res
elif prefix == 'sppk':
return b'\x01' + res
elif prefix == 'p2pk':
return b'\x02' + res
raise ValueError(f'Unrecognized key type: #{prefix}')
def parse_public_key(data: bytes) -> str:
""" Decode public key from byte form.
:param data: encoded public key.
:returns: base58 encoded public key
"""
key_prefix = {
b'\x00': b'edpk',
b'\x01': b'sppk',
b'\x02': b'p2pk'
}
return base58_encode(data[1:], key_prefix[data[:1]]).decode()
def parse_chain_id(data: bytes):
""" Decode chain id from byte form.
:param data: encoded chain id.
:returns: base58 encoded chain id
"""
return base58_encode(data, b'Net').decode()
def parse_signature(data: bytes):
""" Decode signature from byte form.
:param data: encoded signature.
:returns: base58 encoded signature (generic)
"""
return base58_encode(data, b'sig').decode()
def forge_address(value: str, tz_only=False) -> bytes:
""" Encode address or key hash into bytes.
:param value: base58 encoded address or key_hash
:param tz_only: True indicates that it's a key_hash (will be encoded in a more compact form)
"""
prefix = value[:3]
address = base58.b58decode_check(value)[3:]
if prefix == 'tz1':
res = b'\x00\x00' + address
elif prefix == 'tz2':
res = b'\x00\x01' + address
elif prefix == 'tz3':
res = b'\x00\x02' + address
elif prefix == 'KT1':
res = b'\x01' + address + b'\x00'
else:
raise ValueError(value)
return res[1:] if tz_only else res
def parse_address(data: bytes):
""" Decode address or key_hash from bytes.
:param data: encoded address or key_hash
:returns: base58 encoded address
"""
tz_prefixes = {
b'\x00\x00': b'tz1',
b'\x00\x01': b'tz2',
b'\x00\x02': b'tz3'
}
for bin_prefix, tz_prefix in tz_prefixes.items():
if data.startswith(bin_prefix):
return base58_encode(data[2:], tz_prefix).decode()
if data.startswith(b'\x01') and data.endswith(b'\x00'):
return base58_encode(data[1:-1], b'KT1').decode()
else:
return base58_encode(data[1:], tz_prefixes[b'\x00' + data[:1]]).decode()
def parse_contract(data: bytes):
""" Decode contract (address + optional entrypoint) from bytes
:param data: encoded contract
:returns: base58 encoded address and entrypoint (if exists) separated by `%`
"""
res = parse_address(data[:22])
if len(data) > 22:
res += f'%{data[22:].decode()}'
return res
def forge_bool(value: bool) -> bytes:
""" Encode boolean value into bytes.
"""
return b'\xff' if value else b'\x00'
def forge_array(data, len_bytes=4) -> bytes:
""" Encode array of bytes (prepend length).
:param data: list of bytes
:param len_bytes: number of bytes to store array length
"""
return len(data).to_bytes(len_bytes, 'big') + data
def parse_array(data, len_bytes=4) -> tuple:
""" Decode array of bytes.
:param data: encoded array
:param len_bytes: number of bytes to store array length
:returns: Tuple[list of bytes, array length]
"""
assert len(data) >= len_bytes, f'not enough bytes to parse array length, wanted {len_bytes}'
length = int.from_bytes(data[:len_bytes], 'big')
assert len(data) >= len_bytes + length, f'not enough bytes to parse array body, wanted {length}'
return data[len_bytes:len_bytes+length], len_bytes+length
def forge_base58(value: str) -> bytes:
""" Encode base58 string into bytes.
:param value: base58 encoded value (with checksum)
"""
return base58_decode(value.encode())
def forge_timestamp(value) -> int:
""" Encode timestamp into bytes.
:param value: unix timestamp in seconds (int)
"""
assert isinstance(value, str)
dt = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
return calendar.timegm(dt.utctimetuple())
def forge_contract(value) -> bytes:
""" Encode a value of contract type (address + optional entrypoint) into bytes.
:param value: 'tz12345' or 'tz12345%default'
"""
parts = value.split('%')
address, entrypoint = (parts[0], parts[1]) if len(parts) == 2 else (parts[0], 'default')
res = forge_address(address)
if entrypoint != 'default':
res += entrypoint.encode()
return res
|
15,026 | 4cba6e32808fcd54911d75d7a1bd949bb0cce654 | #Change the color of the points to 'red'.
# Change the marker color to red
plt.scatter(cellphone.x, cellphone.y,
color='red')
# Add labels
plt.ylabel('Latitude')
plt.xlabel('Longitude')
# Display the plot
plt.show()
#Change the marker shape to square.
# Change the marker shape to square
plt.scatter(cellphone.x, cellphone.y,
color='red',
marker='s')
# Add labels
plt.ylabel('Latitude')
plt.xlabel('Longitude')
# Display the plot
plt.show()
#Change the transparency of the scatterplot to 0.1
# Change the transparency to 0.1
plt.scatter(cellphone.x, cellphone.y,
color='red',
marker='s',
alpha=0.1)
# Add labels
plt.ylabel('Latitude')
plt.xlabel('Longitude')
# Display the plot
plt.show()
|
15,027 | d9f8eefdf0141b6a2b10180fc41f897257cdc2e5 | import datetime
now = datetime.datetime.now()
ano = int(input('Informe o ano do atleta: '))
idade = now.year - ano
if idade < 9:
print('Idade do atleta: {}\nCategoria Mirim'.format(idade))
elif idade <= 14:
print('Idade do atleta: {}\nCategoria Infantil'.format(idade))
elif idade <= 19:
print('Idade do atleta: {}\nCategoria Junior'.format(idade))
elif idade <= 20:
print('Idade do atleta: {}\nCategoria Sênior'.format(idade))
else:
print('Idade do atleta: {}\nCategoria Master'.format(idade))
|
15,028 | 40c52079cffd29b3e5ad5dee782db361623fa023 | import requests
def singleton(cls):
instance = {}
def singleton_inner(*args, **kwargs):
if cls not in instance:
instance[cls] = cls(*args, **kwargs)
return instance[cls]
return singleton_inner
@singleton
class VaultSession:
def __init__(self):
self.session = requests.Session()
|
15,029 | 5a5b14bfd874a8b9acb831075195cbaaeebed5d8 | import math, collections
import wikipedia
from util import getWordCountWiki, dot
# Given two article, return a word spectrum. Input must be a Counter.
def getPairUniqueness(article1, article2, smooth=1, returntype=collections.Counter):
result = returntype()
if article1==article2:
return result
for w in article1:
result[w] = 1
for w in article2:
result[w] = 1
for w in result:
result[w] = float(article1[w] + smooth) / float(article2[w] + smooth)
return result
def getLogPairUniqueness(article1, article2, smooth=1, returntype=collections.Counter):
result = returntype()
if article1==article2:
return result
for w in article1:
result[w] = 1
for w in article2:
result[w] = 1
for w in result:
result[w] = math.log(float(article1[w] + smooth) / float(article2[w] + smooth))
return result
#####################################################################################
# Compare two article and rank other words in the article onto a spectrum
# This functions is pretty magical. Can be very useful ! ! !
def getWikiPairUniqueness(title1, title2, smooth=1, returntype=collections.Counter):
return getLogPairUniqueness(getWordCountWiki(title1), getWordCountWiki(title2), smooth, returntype)
def normalizedCompare(title1, title2, smooth=1, returntype=collections.Counter):
if title1==title2:
return returntype()
maxi, mini = float('-inf'), float('inf')
result = getWikiPairUniqueness(title1, title2, smooth, returntype)
for words in result:
maxi = max(maxi, result[words])
mini = min(mini, result[words])
for words in result:
result[words] = 2 * float(result[words] - mini) / (maxi - mini) - 1
return result
def getRelativeLogUniqueness(article, key, smooth=1, returntype=collections.Counter):
result = returntype()
for w in article:
result[w] = (math.log(float(article[w] + smooth)) - math.log(float(smooth))) / (math.log(float(article[key] + smooth)) - math.log(float(smooth)))
return result
def getLogUniqueness(article, smooth=1, returntype=collections.Counter):
result = returntype()
for w in article:
result[w] = math.log(float(article[w] + smooth)) - math.log(float(smooth))
return result
# !!!!!! Danger !!!!!! Not usable with >1 word long title
def normalizedLogUniqueness(title, smooth=1, returntype=collections.Counter):
return getLogUnisonUniqueness(getWordCountWiki(title), title.lower(), smooth, returntype)
def compareArticleWithBase(title, listOfTitle, weight=1, smooth=1, returntype=collections.Counter):
result = returntype()
N = float(len(listOfTitle)) / weight
for title2 in listOfTitle:
cmp = normalizedCompare(title, title2, smooth, returntype)
for w in cmp:
result[w] += cmp[w]
for w in result:
result[w] /= N
return result
def expofilter(count, factor, threshold=0):
wordlist = list(count)
for w in wordlist:
if count[w] < threshold:
del count[w]
else:
count[w] = math.exp(factor * count[w])
def linfilter(count, threshold=0):
wordlist = list(count)
for w in wordlist:
if count[w] < threshold:
del count[w]
def getRelativeCount(title, listOfTitle, factor=1, threshold=0, weight=1, smooth=1, returntype=collections.Counter):
result = compareArticleWithBase(title, listOfTitle, weight, smooth, returntype)
expofilter(result, factor, threshold)
return result
################################################################################
def oddOneOut1(title1, title2, title3, factor=1, threshold=0, weight=1, smooth=1, returntype=collections.Counter):
namelist = [title1, title2, title3]
cmp1 = getRelativeCount(namelist[0], namelist, factor, threshold, weight, smooth, returntype)
cmp2 = getRelativeCount(namelist[1], namelist, factor, threshold, weight, smooth, returntype)
cmp3 = getRelativeCount(namelist[2], namelist, factor, threshold, weight, smooth, returntype)
common12 = dot(cmp1,cmp2)
common13 = dot(cmp1,cmp3)
common23 = dot(cmp2,cmp3)
print title1, "has odd factor of", common23
print title2, "has odd factor of", common13
print title3, "has odd factor of", common12
# perform dot analysis, but pre filter
def getOddity2(namelist, threshold=0):
cmp12 = normalizedCompare(namelist[1], namelist[0])
cmp13 = normalizedCompare(namelist[2], namelist[0])
linfilter(cmp12, threshold)
linfilter(cmp13, threshold)
# print cmp12
# print cmp13
return dot(cmp12, cmp13)
def oddOneOut2(namelist, threshold=0):
result = []
for i in range(3):
result.append(getOddity2([namelist[i], namelist[(i+1)%3], namelist[(i+2)%3]], threshold))
return result.index(max(result))
|
15,030 | f103273dbd355e1fa7c0e5ffa138f951592cb391 | class Solution:
def isPalindrome(self, s: str) -> bool:
lst = [i.lower() for i in s if i.isalnum() == True]
new = lst.copy()
midpoint = int(len(lst)/2)
for i in range(midpoint):
temp = new[i]
new[i] = new[-i-1]
new[-i-1] = temp
return lst == new
|
15,031 | 193939b7f064b8d304653a91af006930a2cddfe0 | import subprocess
import glob
import pandas as pd
import regex
from sklearn.model_selection import train_test_split
# import TensorFlow as tf
import spacy
import pickle
nlp = spacy.load('ja_ginza')
doc = nlp('銀座でランチをご一緒しましょう。')
for sent in doc.sents:
for token in sent:
print(token.i, token.orth_, token.lemma_, token.pos_, token.tag_, token.dep_, token.head.i)
print('EOS')
train_paths = glob.glob('../data/train/*')
test_paths = glob.glob('../data/test/*')
dfs = []
for path in train_paths:
df = pd.read_json(path, orient='records', lines=True)
print(df)
dfs.append(df)
break
train_df = pd.concat(dfs)
import subprocess
from glob import glob
import pandas as pd
import regex
import spacy
from sklearn.model_selection import train_test_split
nlp = spacy.load('ja_ginza')
train_paths = glob('../data/input/train/*')
test_paths = glob('../data/input/test/*')
dfs = []
for path in train_paths:
df = pd.read_json(path, orient='records', lines=True)
print(df)
dfs.append(df)
train_df = pd.concat(dfs)
exit()
dfs = []
for path in test_paths:
df = pd.read_json(path, orient='records', lines=True)
dfs.append(df)
test_df = pd.concat(dfs)
# train, valの分割は、裁判種別と、ラベルの数の多いPERSON, ORGFACPOS, LOCATIONの数が同等程度に分かれるようにすることとする
for df in [train_df, test_df]:
df['file_id'] = df['meta'].apply(lambda x: x['filename'].rstrip('_hanrei.txt')[1:]).map(int)
df['category'] = df['meta'].apply(lambda x: x['category'])
df['stratify'] = df['category'].apply(
lambda x: 'その他' if x in ['労働事件裁判例', '高裁判例'] else x) # 裁判種別でtrain, valを分割。件数の少ない労働事件裁判例, 高裁判例はその他にまとめる
df.drop(['meta', 'annotation_approver'], axis=1, inplace=True)
df.sort_values('file_id', inplace=True)
df.reset_index(drop=True, inplace=True)
PetFinder.my
def count_tag(labels):
"""ラベル種類ごとにラベルの数をカウント"""
dic = {}
for label in labels:
dic[label[2]] = dic.get(label[2], 0) + 1
return dic
train_df['total_nlabel'] = train_df['labels'].apply(lambda x: len(x))
train_df['num_label'] = train_df['labels'].apply(count_tag)
tags = ['PERSON', 'ORGFACPOS', 'LOCATION', 'TIMEX', 'MISC']
tmp_df = train_df['num_label'].apply(pd.Series)[tags]
train_df = pd.concat([train_df, tmp_df], axis=1)
del train_df['num_label'], tmp_df
# 1レコードあたりのPERSON, ORGFACPOS, LOCATIONの数が同等程度に分かれる乱数シードを探索
min_ratios = []
min_diff = 10 ** 5
min_seed = 0
for seed in range(100):
train_ch_df, val_df = train_test_split(train_df, test_size=0.25, random_state=seed, stratify=train_df['stratify'])
ratios = []
for tag in ['PERSON', 'ORGFACPOS', 'LOCATION']:
val_ntag_per_record = val_df[tag].sum() / val_df.shape[0]
train_ntag_per_record = train_ch_df[tag].sum() / train_ch_df.shape[0]
ratios.append(val_ntag_per_record / train_ntag_per_record)
diff = sum([abs(1 - ratio) for ratio in ratios])
if diff < min_diff:
min_ratios = ratios
min_diff = diff
min_seed = seed
print(min_ratios, min_diff, min_seed)
train_ch_df, val_df = train_test_split(train_df, test_size=0.25, random_state=min_seed, stratify=train_df['stratify'])
def format_iob(text, labels):
"""IOB2タグ形式でtokenごとにラベルを振り直す"""
doc = nlp(text)
output = [['', 'O', '']] # 前のラベルを見てB-かI-か決めるのでダミーのラベルを入れておく
INF = 10 ** 9
labels.append([INF, INF, '']) # token.idxがラベルの終わり位置を超えていたら次のラベルの参照に移るので、ダミーのラベルを入れておき、位置を十分大きい値にしておく
label_idx = 0
label = labels[label_idx]
for token in doc:
# token.idxがラベルの終わり位置を超えていたら次のラベルの参照に移る
if label[1] <= token.idx:
label_idx += 1
label = labels[label_idx]
# token.idxがラベルの始まり位置と終わり位置の間にあったらラベルをつける。前のラベルと同じかどうかでB-かI-か決める
if label[0] <= token.idx < label[1]:
if output[-1][2] != label[2]:
output.append([token.text, 'B', label[2]])
else:
output.append([token.text, 'I', label[2]])
else:
output.append([token.text, 'O', ''])
return output[1:] # ダミーのラベルを除いて出力
tagged_tokens = []
texts = train_ch_df.text.values
labels_list = train_ch_df.labels.values
file_ids = train_ch_df.file_id.values
for text, labels in zip(texts, labels_list):
output = format_iob(text, labels)
output = '\n'.join([f'{l[0]} {l[1]}-{l[2]}' if l[1] != 'O' else f'{l[0]} {l[1]}' for l in output])
tagged_tokens.append(output)
tagged_tokens = '\n\n'.join(tagged_tokens)
with open('../data/input/train.txt', mode='w') as f:
f.write(tagged_tokens)
tagged_tokens = []
texts = val_df.text.values
labels_list = val_df.labels.values
file_ids = val_df.file_id.values
for text, labels in zip(texts, labels_list):
output = format_iob(text, labels)
output = '\n'.join([f'{l[0]} {l[1]}-{l[2]}' if l[1] != 'O' else f'{l[0]} {l[1]}' for l in output])
tagged_tokens.append(output)
tagged_tokens = '\n\n'.join(tagged_tokens)
with open('../data/input/dev.txt', mode='w') as f:
f.write(tagged_tokens)
test_df['labels'] = [[]] * test_df.shape[0]
tagged_tokens = []
texts = test_df.text.values
labels_list = test_df.labels.values
file_ids = test_df.file_id.values
for text, labels in zip(texts, labels_list):
output = format_iob(text, labels)
output = '\n'.join([f'{l[0]} {l[1]}-{l[2]}' if l[1] != 'O' else f'{l[0]} {l[1]}' for l in output])
tagged_tokens.append(output)
tagged_tokens = '\n\n'.join(tagged_tokens)
with open('../data/test_out.txt', mode='w') as f:
f.write(tagged_tokens) |
15,032 | 5eb50c15b17d266d85e3ef5d840c8b60a511eef9 | import math
def verif_triangle(a,b,c):
if a + b < c:
print ('No')
elif a + c < b:
print ('No')
elif b + c < a:
print ('No')
else:
print ('Yes')
def input_triangle():
a = input('Informe o tamanho do 1º graveto:\n')
b = input('Informe o tamanho do 2º graveto:\n')
c = input('Informe o tamanho do 3º graveto:\n')
verif_triangle(int(a),int(b),int(c))
input_triangle()
|
15,033 | 40c359ef13e40cc777e014cb3fd40a7b005fa4b6 | from abc import ABCMeta, abstractmethod
class IModel:
__metaclass__=ABCMeta
def fetchall(self, databaseName):
"""
Gets all Moviereview datastore entries along with the sentiment analysis performed on the comments
"""
pass
def addreview(self, name, year, genre, rating, review, reviewer):
"""
Inserts a movie review into Moviereview datastore together with sentiment analysis performed on the comments
"""
pass
def fetchTranslation(self, language):
"""
Gets all Moviereviews translated into choosen language
"""
pass |
15,034 | c59955f286d7d5ccbecfc6d3dcb08ecffeef565c | # Detection and Recognition using ZED Camera
import cv2, sys, os, math
import numpy as np
import pyzed.sl as sl
camera_settings = sl.CAMERA_SETTINGS.CAMERA_SETTINGS_BRIGHTNESS
str_camera_settings = "BRIGHTNESS"
step_camera_settings = 1
# Create a Camera object
zed = sl.Camera()
# Create a InitParameters object and set configuration parameters
init_params = sl.InitParameters()
init_params.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_PERFORMANCE # Use PERFORMANCE depth mode
init_params.coordinate_units = sl.UNIT.UNIT_METER # Use meter units (for depth measurements)
cam = sl.Camera()
status = cam.open(init_params)
mat = sl.Mat()
runtime = sl.RuntimeParameters()
def dataset():
haar_file = 'haarcascade_frontalface_default.xml'
# Faces are stored in this folder
datasets = 'datasets'
# Every person is saved as a separate folder
sub_data = input('Enter Name')
path = os.path.join(datasets, sub_data)
if not os.path.isdir(path):
os.mkdir(path)
# Defining fixed size for image
(width, height) = (130, 100)
face_cascade = cv2.CascadeClassifier(haar_file)
# webcam = cv2.VideoCapture(0)
# Capturing 100 images for dataset
count = 1
while count < 100:
(_, jkl) = webcam.read()
im = jkl[0:720, 0:514]
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 4)
for (x, y, w, h) in faces:
cv2.rectangle(im, (x, y), (x + w, y + h), (255, 0, 0), 2)
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (width, height))
cv2.imwrite('% s/% s.png' % (path, count), face_resize)
count += 1
haar_file = 'haarcascade_frontalface_default.xml'
datasets = 'datasets'
o = 0
count = 0
# Creating Fisher Recognizer
print('Recognizing Face Please Be in sufficient Lights...')
# Creating a list of images and a list of corresponding names
(images, lables, names, id) = ([], [], {}, 0)
for (subdirs, dirs, files) in os.walk(datasets):
for subdir in dirs:
names[id] = subdir
subjectpath = os.path.join(datasets, subdir)
for filename in os.listdir(subjectpath):
path = subjectpath + '/' + filename
lable = id
images.append(cv2.imread(path, 0))
lables.append(int(lable))
id += 1
(width, height) = (130, 100)
# Creating a Numpy array using the lists of images and names
(images, lables) = [np.array(lis) for lis in [images, lables]]
# Training model using LBPH
model = cv2.face.LBPHFaceRecognizer_create()
model.train(images, lables)
# Use Fisher Recognizer on video feed
face_cascade = cv2.CascadeClassifier(haar_file)
init = sl.InitParameters()
webcam = sl.Camera()
runtime_parameters = sl.RuntimeParameters()
runtime_parameters.sensing_mode = sl.SENSING_MODE.SENSING_MODE_STANDARD # Use STANDARD sensing mode
# Capture 50 images and depth, then stop
i = 0
image = sl.Mat()
depth = sl.Mat()
point_cloud = sl.Mat()
while True:
err = cam.grab(runtime)
if err == sl.ERROR_CODE.SUCCESS:
cam.retrieve_image(mat, sl.VIEW.VIEW_RIGHT)
im = mat.get_data()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(im, (x, y), (x + w, y + h), (255, 0, 0), 2)
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (width, height))
# Try to recognize the face
prediction = model.predict(face_resize)
cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 3)
# A new image is available if grab() returns SUCCESS
if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
# Retrieve left image
zed.retrieve_image(image, sl.VIEW.VIEW_LEFT)
# Retrieve depth map. Depth is aligned on the left image
zed.retrieve_measure(depth, sl.MEASURE.MEASURE_DEPTH)
# Retrieve colored point cloud. Point cloud is aligned on the left image.
zed.retrieve_measure(point_cloud, sl.MEASURE.MEASURE_XYZRGBA)
# Get and print distance value in mm at the center of the image
# We measure the distance camera - object using Euclidean distance
x = round(image.get_width() / 2)
y = round(image.get_height() / 2)
err, point_cloud_value = point_cloud.get_value(x, y)
distance = math.sqrt(point_cloud_value[0] * point_cloud_value[0] +
point_cloud_value[1] * point_cloud_value[1] +
point_cloud_value[2] * point_cloud_value[2])
if not np.isnan(distance) and not np.isinf(distance):
distance = round(distance)
print("Distance to Camera at ({0}, {1}): {2} mm\n".format(x, y, distance))
# Increment the loop
i = i + 1
else:
print("Can't estimate distance at this position, move the camera\n")
sys.stdout.flush()
if prediction[1] < 90:
cv2.putText(im, '% s - %.0f' %
(names[prediction[0]], prediction[1]), (x - 10, y - 10),
cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0))
else:
cv2.putText(im, 'not recognized',
(x - 10, y - 10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0))
count = count + 1
if count == 50:
if o == 0:
print('Add person? (Y/N)')
resp = input('Enter Response')
if resp == 'y':
dataset()
o = 1
cv2.imshow('OpenCV', im)
# key = cv2.waitKey(10)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release Capture
webcam.release()
cv2.destroyAllWindows()
# Close the camera
zed.close() |
15,035 | e8eb8dac73f62516c25b5381258abbc9102d3dda | # -*- coding: utf-8 -*-
n = int(raw_input())
p = set(filter(lambda x: x, map(int, raw_input().split(' '))[1:]))
q = set(filter(lambda x: x, map(int, raw_input().split(' '))[1:]))
if len(p | q) >= n:
print('I become the guy.')
else:
print('Oh, my keyboard!')
|
15,036 | 41e810a2e103a6372aa0c10bddab5de3f97815d5 | # SMART MICROWAVE PYTHON CODE __VERSION__2.0.0 Date-07/07/2020 Owner - Udyogyantra Tecchnologies
# #======================================================================================================================================
import time
import redis
import json
import oven
import led
from threading import Thread
x = led.led()
x.configure()
MW = oven.Butler()
MW.configure()
cache = redis.Redis()
cache.set("PrevState", "Self_Check")
cache.set("CurrentState", "Self_Check")
# STATES=1['Self_Check','Emergency','Idle','HeatingOn','#TempAlreadyHigh','HeatingCompleted','HeatingPaused',
# 'InternetError','DoorOpen','SensorError']
class State(object): # The base template state which all others will inherit from
def __init__(self, FSM):
self.FSM = FSM
self.stopTime = 0
self.startTime = 0
def Enter(self):
self.startTime = time.time()
def Execute(self):
r = SMART_MW_Oven()
for counter in range(2):
pt = time.time()
if cache.get('Magnetron').decode() == '0':
break
else:
MW.stop()
while time.time() - pt <= 3.0:
if cache.get('Door').decode() == '1':
r.FSM.ToTransition("toDoorOpen")
r.Execute()
break
if counter == 1 and cache.get('Magnetron').decode() == '1':
r.FSM.ToTransition("toManualOverride")
r.Execute()
def Exit(self):
pass
class Self_Check(State):
def __init__(self, FSM):
super().__init__(FSM)
def Enter(self):
super().Enter()
cache.set("CurrentState", "Self_Check")
def Execute(self):
super().Execute()
x.self_check()
events = json.loads(cache.get("InputStatusAndReading"))
r = SMART_MW_Oven()
if events["Internet"] == 1 and events["TempSensor"] == 200:
r.FSM.ToTransition("toIdle")
elif events["Internet"] == 0:
r.FSM.ToTransition("toInternetError")
else:
r.FSM.ToTransition("toSensorError")
r.Execute()
def Exit(self):
cache.set("PrevState", "Self_Check")
class ManualOverride(State):
def __init__(self, FSM):
super().__init__(FSM)
def Enter(self):
super().Enter()
cache.set("CurrentState", "ManualOverride")
def Execute(self):
global t
cache.set("LedOn", '1')
t = Thread(target=x.manual_override)
t.start()
def Exit(self):
cache.set("PrevState", "ManualOverride")
global t
if t.isAlive():
cache.set("LedOn", '0')
t.join()
class Idle(State):
def __init__(self, FSM):
super().__init__(FSM)
def Enter(self):
super().Enter()
cache.set("CurrentState", "Idle")
def Execute(self):
super().Execute()
x.idle()
def Exit(self):
cache.set("PrevState", "Idle")
class HeatingOn(State):
def __init__(self, FSM):
super(HeatingOn, self).__init__(FSM)
def Enter(self):
super(HeatingOn, self).Enter()
cache.set("CurrentState", "HeatingOn")
def Execute(self):
r = SMART_MW_Oven()
x.heating_on()
for counter in range(2):
pt = time.time()
if cache.get('Magnetron').decode() == '1':
break
else:
MW.start()
while time.time() - pt <= 3.0:
if cache.get('Door').decode() == '1':
r.FSM.ToTransition("toDoorOpen")
r.Execute()
break
if counter == 1 and cache.get('Magnetron').decode() == '0':
r.FSM.ToTransition("toManualOverride")
r.Execute()
def Exit(self):
cache.set("PrevState", "HeatingOn")
class HeatingCompleted(State):
def __init__(self, FSM):
super(HeatingCompleted, self).__init__(FSM)
def Enter(self):
super(HeatingCompleted, self).Enter()
x.heating_completed()
cache.set("CurrentState", "HeatingCompleted")
cache.set("Action", "Stop")
MW.stop()
def Execute(self):
super().Execute()
def Exit(self):
cache.set("PrevState", "HeatingCompleted")
class HeatingPaused(State):
def __init__(self, FSM):
super(HeatingPaused, self).__init__(FSM)
def Enter(self):
super(HeatingPaused, self).Enter()
cache.set("CurrentState", "HeatingPaused")
def Execute(self):
r = SMART_MW_Oven()
x.heating_paused()
for counter in range(2):
pt = time.time()
if cache.get('Magnetron').decode() == '0':
break
else:
MW.pause()
while time.time() - pt <= 3.0:
if cache.get('Door').decode() == '1':
r.FSM.ToTransition("toDoorOpen")
r.Execute()
break
if (counter == 1 and cache.get('Magnetron').decode() == '1'):
r.FSM.ToTransition("toManualOverride")
r.Execute()
def Exit(self):
cache.set("PrevState", "HeatingPaused")
class InternetError(State):
def __init__(self, FSM):
super(InternetError, self).__init__(FSM)
def Enter(self):
super(InternetError, self).Enter()
cache.set("CurrentState", "InternetError")
if cache.get("Magnetron").decode() == 1:
MW.stop()
def Execute(self):
super().Execute()
global t
cache.set("LedOn", '1')
t = Thread(target=x.internet_error)
# print("THREAD ENTER")
t.start()
# print(t.isAlive())
def Exit(self):
cache.set("PrevState", "InternetError")
global t
if t.isAlive():
cache.set("LedOn", '0')
t.join()
class DoorOpen(State):
def __init__(self, FSM):
super(DoorOpen, self).__init__(FSM)
def Enter(self):
super(DoorOpen, self).Enter()
cache.set("CurrentState", "DoorOpen")
if (cache.get("PrevState").decode() == "HeatingOn" or cache.get("PrevState").decode() == "HeatingPaused"):
MW.pause()
def Execute(self):
super().Execute()
x.door_open()
def Exit(self):
cache.set("PrevState", "DoorOpen")
class SensorError(State):
def __init__(self, FSM):
super(SensorError, self).__init__(FSM)
def Enter(self):
super(SensorError, self).Enter()
cache.set("CurrentState", "SensorError")
if cache.get("Magnetron").decode() == 1:
MW.stop()
def Execute(self):
super().Execute()
global t
cache.set("LedOn", '1')
t = Thread(target=x.sensor_error)
t.start()
def Exit(self):
cache.set("PrevState", "SensorError")
global t
if t.isAlive():
cache.set("LedOn", '0')
t.join()
class Transition(object): # Class called when any transition occurs from one state to another
def __init__(self, toState):
self.toState = toState
def Execute(self): pass
# ==================================================================================================================
# FINITE STATE MACHINES
class FSM(object): # Holds the states and transitions available executes current states main functions and transitions
def __init__(self, character):
self.char = character
self.states = {}
self.transitions = {}
self.curState = None
self.prevState = None # USE TO PREVENT LOOPING 2 STATES FOREVER
self.trans = None
def AddTransition(self, transName, transition):
self.transitions[transName] = transition
def AddState(self, stateName, state):
self.states[stateName] = state
def SetState(self, stateName):
self.prevState = self.curState
self.curState = self.states[stateName]
def ToTransition(self, toTrans):
self.trans = self.transitions[toTrans]
print(toTrans)
def Execute(self):
if self.trans:
self.curState.Exit()
self.trans.Execute()
self.SetState(self.trans.toState)
self.curState.Enter()
self.trans = None
self.curState.Execute()
# print(Transition("Self_Check"))#self.curState.Enter()
##=================================================================================================================================================
## IMPLEMENTATION
Char = type("Char", (object,), {})
class SMART_MW_Oven(Char): # #Base character which will be holding the Finite State Machine, which will hold the
# states and
# transitions. '''
def __init__(self):
self.FSM = FSM(self)
## STATES
self.FSM.AddState("Idle", Idle(self.FSM))
self.FSM.AddState("HeatingOn", HeatingOn(self.FSM))
self.FSM.AddState("Self_Check", Self_Check(self.FSM))
self.FSM.AddState("HeatingCompleted", HeatingCompleted(self.FSM))
self.FSM.AddState("HeatingPaused", HeatingPaused(self.FSM))
self.FSM.AddState("InternetError", InternetError(self.FSM))
self.FSM.AddState("DoorOpen", DoorOpen(self.FSM))
self.FSM.AddState("SensorError", SensorError(self.FSM))
self.FSM.AddState("ManualOverride", ManualOverride(self.FSM))
## TRANSITIONS
self.FSM.AddTransition("toIdle", Transition("Idle"))
self.FSM.AddTransition("toHeatingOn", Transition("HeatingOn"))
self.FSM.AddTransition("toSelf_Check", Transition("Self_Check"))
self.FSM.AddTransition("toHeatingCompleted", Transition("HeatingCompleted"))
self.FSM.AddTransition("toHeatingPaused", Transition("HeatingPaused"))
self.FSM.AddTransition("toInternetError", Transition("InternetError"))
self.FSM.AddTransition("toDoorOpen", Transition("DoorOpen"))
self.FSM.AddTransition("toSensorError", Transition("SensorError"))
self.FSM.AddTransition("toManualOverride", Transition("ManualOverride"))
self.FSM.SetState("Idle")
def Execute(self):
self.FSM.Execute()
|
15,037 | ff1d06e3090807133476603013a39a4f5cbcd0d7 | # -*- coding: utf-8 -*-
# !/usr/bin/env
# !/Library/Frameworks/Python.framework/Versions/3.8/bin/python3
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
def createlollipopgraph(d):
# Create a dataframe
# random create a dataframe
df = pd.DataFrame({
'group': list(map(chr, range(65, 85))),
'values': np.random.uniform(size=20)})
# convert values from % to integers
cap_prod = int(d['Aumento capacità produttiva'][:-1])
eff_prod = int(d['Aumento efficienza produttiva'][:-1])
div_serv = int(d['Diversificazione Servizi'][:-1])
div_prod = int(d['Diversificazione Prodotti'][:-1])
df = pd.DataFrame({
'group': ['Aumento Cap.\n Produttiva', 'Aumento\n Efficienza prod.',
'Divers.\nProdotti', 'Divers.\nServizi'],
'values': [cap_prod, eff_prod, div_serv, div_prod]
})
# df['values'] = [100, 100, 50, 50]
# Reorder it following the values:
ordered_df = df.sort_values(by='values')
print("df index = ", df.index)
my_range = range(1, len(df.index) + 1)
# The vertival plot is made using the hline function
# Sns is the seaborn library. Only to benefit the nice looking features
sns.set()
plt.hlines(y=my_range, xmin=0, xmax=ordered_df['values'], color='skyblue')
plt.plot(ordered_df['values'], my_range, "o")
# Add titles and Axis names
plt.yticks(my_range, ordered_df['group'])
plt.title('Priorità: Produzione Aziendale', loc='center')
plt.xlabel('Value of the variable')
plt.ylabel('Priorities')
# plt.show()
# Name of the file must be the same as imported in create_docs.py
plt.savefig('produzione.png', bbox_inches="tight")
plt.clf()
|
15,038 | fb5d17fdf1810c266320797c563d187127c075f8 | import math
## Solution
# 做法,把這個數字除以第一個 `prime number`, 一直到無法整除,接下來除以第二個 `prime number...` 一次類推,一直到最後剩下的數字也是 `prime number` 爲止, 要考慮到不要一直重複的檢查某個數字是否爲prime, 只要有檢查過,就記下來。這樣之後再使用就可用O(1) 的時間來判斷是否是 `prime`
prime_list = {}
def is_prime(n):
primes = [2, 3, 5, 7]
if n in prime_list:
return True
for p in primes:
if p == n:
prime_list[p] = True
return True
if n % p == 0:
return False
sr = int(math.sqrt(n)) + 1
for i in range(2, sr):
if n % i == 0:
return False
prime_list[n] = True
return True
def prime_factor(n):
m = n
primes = []
for i in range(2, n + 1):
if is_prime(i):
cur_prime = None
while (m % i) == 0 and ((m / float(i)) % 1 == 0):
cur_prime = i
t = m / i
if t % 1 != 0:
break
else:
m = int(t)
if m == 1:
return max(primes)
if is_prime(m):
primes.append(m)
break
if cur_prime != None:
primes.append(cur_prime)
cur_prime = None
return max(primes)
print(prime_factor(600851475143))
|
15,039 | 49d94c18b11172eaa3d44547ef00a48c07acd274 | #Autor: Javier Urrutia
#Email: javier.urrutia.r@ug.uchile.cl
#Excepcion de archivo modificado
class ArchivoModificado(Exception):
pass
# Clase para manejar archivo con movimientos
class Movimientos:
# Crea un objeto Movimientos con el archivo
def __init__(self, archivo):
assert type(archivo) == str
self.__archivo = archivo
self.listaMovi = []
self.scan()
# Scan: Busca movimientos en el archivo
def scan(self):
try:
f = open(self.__archivo)
self.listaMovi=[]
pos = 0
for line in f:
ind = line.find(": //M")
if ind > 0:
self.listaMovi.append((line[ind + 4:ind + 7], pos))
pos += (len(line) + 1)
f.close()
except MemoryError:
print("Memoria llena")
return
# leerMov: Lee un movimiento del archivo
def leerMov(self, num):
assert type(num) == int
movimientos = []
f = open(self.__archivo)
f.seek(self.listaMovi[num][1])
header = f.readline()
try:
comando=header[header.find("case '")+6]
descripcion = header[header.find('-') + 2:-1]
f.readline()
f.readline()
linea = f.readline()
while '{' in linea:
pose = linea.strip('\t\n,{} ').split(',')
pose = [int(val) for val in pose]
movimientos.append(pose)
linea = f.readline()
# Buscar motores
funcion = f.readline().split(',')
mot_inicio = funcion[1].strip('\" ')
mot_final = funcion[2].strip('\n\" ;)')
except (IndexError, ValueError):
f.close()
raise ArchivoModificado("El archivo original ha sido modificado o no tiene el formato correcto")
f.close()
return movimientos, comando, self.listaMovi[num][0][1:3], descripcion, mot_inicio, mot_final
# escribirMov: escribe un movimiento en el archivo
def escribirMov(self, Mov):
return
|
15,040 | 91a87294d8d47857be2648b14f46097aecc5037c | import csv
filename = raw_input("Please enter the csv file you'd like to use (i.e. names.csv)>>")
names = []
while filename != "END":
with open(filename, 'rb') as f:
reader = csv.reader(f)
for row in reader:
if row[0] not in names:
names.append(row[0])
filename = raw_input("Please enter the csv file you'd like to use (i.e. names.csv)>>")
with open('final.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',')
for name in names:
spamwriter.writerow([name])
|
15,041 | 60445717b81d3974c750ab4df5100785ab06d1f6 | version https://git-lfs.github.com/spec/v1
oid sha256:55a046e8eea9526243fe920528bbf26ddc331203ef81d022026b0b156a5433df
size 40004
|
15,042 | 9192924e84cf553079634d4c651bb29d1d76f60e | """
CP1404/CP5632 Practical - Suggested Solution
A program that allows user to look up hexadecimal colour codes like those at http://www.color-hex.com/color-names.html
"""
NAME_TO_CODE = {"ALICEBlUE": "#f0f8ff",
"ANTIQUEWHITE": "#faebd7",
"BEIGE": "#f5f5dc",
"BLACK": "#000000",
"BLANCHEDALMOND": "#ffebcd",
"BLUEVIOLET": "#8a2be2",
"BROWN": "#a52a2a",
"BURLYWOOD": "#deb887",
"CADETBLUE": "#5f9ea0",
"CHOCOLATE": "#d2691e",
"CORAL": "#ff7f50",
"CORNFLOWERBLUE": "#6495ed",
"DARKGOLDENROD": "#b8860b",
"DARKGREEN": "#006400"}
print(NAME_TO_CODE)
def main():
"""main()_method - starting point of the program"""
color_name = input("Enter the name of color: ").strip().upper() # strip white spaces. lowercase inputs also work
max_key_length = max([len(key) for key in NAME_TO_CODE.keys()])
while color_name != "":
if color_name in NAME_TO_CODE:
print("{:{}} is {}".format(color_name, max_key_length, NAME_TO_CODE[color_name],))
else:
print("Invalid color name")
color_name = input("Enter the name of color: ").strip().upper()
if __name__ == '__main__':
main() |
15,043 | b7e44e22d89d25460a3e4e7b52f33e8c05af1013 | # Generated by Django 3.2.4 on 2021-06-15 18:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tweets', '0008_auto_20210614_1233'),
]
operations = [
migrations.AddField(
model_name='tweet',
name='parent',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='tweets.tweet'),
),
migrations.AlterField(
model_name='tweet',
name='likes',
field=models.ManyToManyField(blank=True, related_name='tweet_user', through='tweets.TweetLike', to=settings.AUTH_USER_MODEL),
),
]
|
15,044 | f7222ed495c7432dc89e50448708fcf974766b63 | # Generated by Django 2.0.4 on 2018-07-22 09:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('compounds', '0016_auto_20180722_1053'),
]
operations = [
migrations.RenameField(
model_name='userodorantsource',
old_name='compound',
new_name='user_compound',
),
migrations.RemoveField(
model_name='userodorantsource',
name='user',
),
]
|
15,045 | 02cb55916ac5d0ef94b8ee1a544658b831f1254b | import json
"""crear json con listas"""
ls=[1,2,3]
ls2=[[1,2],[3,4],[4,5]]
"""escribir archivo json"""
with open("test3.json", "w") as fl:
json.dump(ls2, fl,indent=4)
"""leer un json"""
with open("test3.json", "r") as fl:
ls=json.load(fl) ##convierte el json a un diccionario
print(ls)
print(type(ls))
for i in range(0,len(ls)):
print(ls[i])
ls.append([4,5])
print(ls)
# ls.append(5)
# print()
# for i in range(0,len(ls)):
# print(ls[i]) |
15,046 | 598525118fdfe5db05660e29f3817d4d3773863e | import facebook
import pprint
graph = facebook.GraphAPI('AAACEdEose0cBAJoNl0ruByNwBIAHxUNCZB01eKCkAb6jco8WSQJxfkyXWMYGTkDYrg4yIKK75VPqlEXeU7Ywbmk15Fdce1t3oRjSkAiba9o29MxRe')
friends = [item['uid2'] for item in graph.fql('SELECT uid2 FROM friend WHERE uid1 = me()')['data']]
for friend in friends:
try:
print friend
info = graph.get_object(friend)
likes = graph.get_connections(friend, 'likes')['data']
print '#'*10
print info['name']
pprint.pprint(likes)
except Exception:
continue
#print likes
"""
for item in likes:
fql = 'SELECT uid FROM page_fan WHERE page_id = %s AND uid IN %s' %(item['id'], tuple(friends))
d = graph.fql(fql)['data']
print '%s\t%s\t%s' %(item['name'], item['category'], d)
"""
|
15,047 | 3b2cee38802147e940a8ac9cb990434ad7e82d57 | import numpy as np
class periodicmap_spliter:
def __init__(self):
pass
@staticmethod
def get_slices_chk_N(N, LD_res, HD_res, buffers, inverse=False):
"""
This lengthy, irritating piece of code returns the slice idcs for subcube (i,j)
in a decomposition of an original map of shape (2**HD_res[0],2**HD_res[1]) with chunks of 2**LD_res per sides,
together with buffers[0],[1] buffers pixel on each side, fixed by the periodicity condition of the HD map.
Nothing very subtle for the map interior, but the edges and corners require a little bit of care.
E.g. this is how this can be used to extract a LD cube:
cub = np.zeros((2**LD_res[0]+ 2*buffers[0],2**LD_res[1] + 2*buffers[1]))
sLDs,sHDs = spliter.get_slices_chk_N(N,LD_res,HD_res,buffers)
for sLD,sHD in zip(sLDs,sHDs): cub[sLD] = map[sHD].
Similary to patch the pieces together to a big cube. In that case you need only the first elements
of the sLDs, sHDs arrays. For instance
newmap = np.zeros((2**HD_res[0],2**HD_res[1]))
for i in xrange(0,2 ** (HD_res[0] - LD_res[0])):
for j in xrange(0,2 ** (HD_res[1] - LD_res[1])):
sLDs,sHDs = spliter.get_slices_chk_ij(i,j,LD_res,HD_res,buffers,inverse = True)
newmap[sHDs[0]] = cubes[i*2 ** (HD_res[1] - LD_res[1]) + j][sLDs[0]]
does patch together the cubes sequence to build the bigger cube.
"""
assert len(LD_res) == 2 and len(HD_res) == 2
if np.all(LD_res == HD_res):
assert N == 0, N
assert buffers == (0, 0), buffers
sl0_LD = slice(0, 2 ** LD_res[0]) # Center of buffered cube
sl1_LD = slice(0, 2 ** LD_res[1])
sl0_HD = slice(0, 2 ** HD_res[0]) # Center of buffered cube
sl1_HD = slice(0, 2 ** HD_res[1])
ret_LD = [(sl0_LD, sl1_LD)]
ret_HD = [(sl0_HD, sl1_HD)]
return ret_LD, ret_HD
assert np.all(LD_res < HD_res)
assert len(buffers) == 2
assert buffers[0] < 2 ** LD_res[0] and buffers[1] < 2 ** LD_res[1]
N0 = 2 ** LD_res[0] # shape of small cube, buffers excl.
N1 = 2 ** LD_res[1]
N0H = 2 ** HD_res[0] # shape of large cube
N1H = 2 ** HD_res[1]
Nchks_0 = 2 ** (HD_res[0] - LD_res[0])
Nchks_1 = 2 ** (HD_res[1] - LD_res[1])
assert N < Nchks_1 * Nchks_0, N
b0 = buffers[0]
b1 = buffers[1]
ret_LD = []
ret_HD = []
j = N % Nchks_1
i = N // Nchks_1 # in 0, ..., Nchks_0 -1
if inverse:
# We want the inverse mapping only
sl0_LD = slice(b0, N0 + b0) # Center of buffered cube
sl1_LD = slice(b1, N1 + b1)
sl0_HD = slice(i * N0, (i + 1) * N0) # slices of HD cube
sl1_HD = slice(j * N1, (j + 1) * N1) # slices of HD cube
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
return ret_LD, ret_HD
if 0 < i < Nchks_0 - 1:
# i in the interior :
sl0_LD = slice(0, N0 + 2 * b0) # Slices of LD cube
sl0_HD = slice(i * N0 - b0, (i + 1) * N0 + b0) # slices of HD cube
if 0 < j < Nchks_1 - 1:
# We are in the interior, no big deal
sl1_LD = slice(0, N1 + 2 * b1)
sl1_HD = slice(j * N1 - b1, (j + 1) * N1 + b1)
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
return ret_LD, ret_HD
elif j == 0:
sl1_LD = slice(b1, N1 + 2 * b1)
sl1_HD = slice(0, N1 + b1)
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
sl1_LD = slice(0, b1)
sl1_HD = slice(2 ** HD_res[1] - b1, 2 ** HD_res[1])
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
return ret_LD, ret_HD
else:
assert j == Nchks_1 - 1
sl1_LD = slice(0, N1 + b1)
sl1_HD = slice(2 ** HD_res[1] - N1 - b1, 2 ** HD_res[1])
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
sl1_LD = slice(N1 + b1, N1 + 2 * b1)
sl1_HD = slice(0, b1)
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
return ret_LD, ret_HD
elif i == 0:
# Bulk 0 slices
sl0_LD = slice(b0, N0 + 2 * b0)
sl0_HD = slice(0, N0 + b0)
if j == 0:
# Upper left corner. Two tweaks.
# Bulk :
sl1_LD = slice(b1, N1 + 2 * b1)
sl1_HD = slice(0, N1 + b1)
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
sl1_LD = slice(b1, N1 + 2 * b1)
sl1_HD = slice(0, N1 + b1)
sl0_LD = slice(0, b0)
sl0_HD = slice(N0H - b0, N0H)
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
sl0_LD = slice(b0, N0 + 2 * b0)
sl0_HD = slice(0, N0 + b0)
sl1_LD = slice(0, b1)
sl1_HD = slice(N1H - b1, N1H)
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
sl0_LD = slice(0, b0)
sl1_LD = slice(0, b1)
sl0_HD = slice(N0H - b0, N0H)
sl1_HD = slice(N1H - b1, N1H)
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
return ret_LD, ret_HD
elif j == Nchks_1 - 1:
# upper right corner
# Bulk :
sl1_LD = slice(0, N1 + b1)
sl1_HD = slice(2 ** HD_res[1] - N1 - b1, 2 ** HD_res[1])
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
sl0_LD = slice(0, b0)
sl0_HD = slice(N0H - b0, N0H)
sl1_LD = slice(0, N1 + b1)
sl1_HD = slice(N1H - N1 - b1, N1H)
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
sl1_LD = slice(N1 + b1, N1 + 2 * b1)
sl1_HD = slice(0, b1)
sl0_LD = slice(b0, N0 + 2 * b0)
sl0_HD = slice(0, b0 + N0)
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
# Last little square is missing :
sl0_LD = slice(0, b0)
sl0_HD = slice(N0H - b0, N0H)
sl1_LD = slice(N1 + b1, N1 + 2 * b1)
sl1_HD = slice(0, b1)
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
return ret_LD, ret_HD
else:
assert 0 < j < Nchks_1 - 1
sl1_LD = slice(0, N1 + 2 * b1)
sl1_HD = slice(j * N1 - b1, (j + 1) * N1 + b1)
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
sl0_LD = slice(0, b0)
sl0_HD = slice(2 ** HD_res[0] - b0, 2 ** HD_res[0])
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
return ret_LD, ret_HD
elif i == Nchks_0 - 1:
sl0_LD = slice(0, N0 + b0)
sl0_HD = slice(2 ** HD_res[0] - N0 - b0, 2 ** HD_res[0])
if j == 0:
# lower left corner. Two tweaks.
# Bulk :
sl1_LD = slice(b1, N1 + 2 * b1)
sl1_HD = slice(0, N1 + b1)
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
sl1_LD = slice(0, b1)
sl1_HD = slice(2 ** HD_res[1] - b1, 2 ** HD_res[1])
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
sl0_LD = slice(N0 + b0, N0 + 2 * b0)
sl0_HD = slice(0, b0)
sl1_LD = slice(b1, N1 + 2 * b1)
sl1_HD = slice(0, N1 + b1)
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
sl0_LD = slice(N0 + b0, N0 + 2 * b0)
sl1_LD = slice(0, b1)
sl0_HD = slice(0, b0)
sl1_HD = slice(N1H - b1, N1H)
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
return ret_LD, ret_HD
elif j == Nchks_1 - 1:
# Lower right corner
# Bulk :
sl1_LD = slice(0, N1 + b1)
sl1_HD = slice(2 ** HD_res[1] - N1 - b1, 2 ** HD_res[1])
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
sl1_LD = slice(N1 + b1, N1 + 2 * b1)
sl1_HD = slice(0, b1)
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
sl0_LD = slice(N0 + b0, N0 + 2 * b0)
sl0_HD = slice(0, b0)
sl1_LD = slice(0, N1 + b1)
sl1_HD = slice(N1H - N1 - b1, N1H)
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
sl0_LD = slice(N0 + b0, N0 + 2 * b0)
sl1_LD = slice(N1 + b1, N1 + 2 * b1)
sl0_HD = slice(0, b0)
sl1_HD = slice(0, b1)
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
return ret_LD, ret_HD
else:
assert 0 < j < Nchks_1 - 1
sl1_LD = slice(0, N1 + 2 * b1)
sl1_HD = slice(j * N1 - b1, (j + 1) * N1 + b1)
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
sl0_LD = slice(N0 + b0, N0 + 2 * b0)
sl0_HD = slice(0, b0)
ret_LD.append((sl0_LD, sl1_LD))
ret_HD.append((sl0_HD, sl1_HD))
return ret_LD, ret_HD
|
15,048 | cf38d7116e187b2dffd3801e7b122ca575f569da | # Setup file for package urlencode
from setuptools import setup
setup(name="urlencode",
version="0.0.1",
install_requires=["quark==0.0.1"],
py_modules=['urlencode'],
packages=['urlencode', 'urlencode_md'])
|
15,049 | f6f625c22005e5e4c56d7ed3e464985308a9d074 | from django.db import models
class Todo(models.Model):
text = models.CharField(max_length=255, null=None)
is_done = models.BooleanField(default=False, null=None)
created_at = models.DateTimeField(auto_now_add=True)
@classmethod
def create(cls, **params):
return cls.objects.create(**params)
@classmethod
def get_list_all(cls):
return list(cls.objects.all())
@classmethod
def get_by_id(cls, id):
try:
return cls.objects.get(id=id)
except cls.DoesNotExist:
return None
def test():
return 1 + 11
def test1():
return 2 + 2
def test2():
return 3 + 3
def test3():
return 3 + 3
def test4():
return 3 + 4
def test5():
return 3 + 4
|
15,050 | bab1597162db15947ee6b3b3fe18da75476a7665 | from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get("http://www.practiceselenium.com/")
elem = driver.find_element_by_class_name('editor_sidebarmore')
elem.click()
driver.implicitly_wait(5)
elem = driver.find_element_by_class_name('wsb-element-button')
elem.click()
driver.implicitly_wait(5)
elem = driver.find_element_by_id('email')
elem.send_keys("test@ing.com")
elem = driver.find_element_by_id('name')
elem.send_keys("Jane Doe")
elem = driver.find_element_by_id('address')
elem.send_keys("42 Walloby Street")
sel = Select(driver.find_element_by_id('card_type'))
sel.select_by_visible_text('Visa')
elem = driver.find_element_by_id('card_number')
elem.send_keys("123123123")
elem = driver.find_element_by_id('cardholder_name')
elem.send_keys("Jane Doe")
elem = driver.find_element_by_id('verification_code')
elem.send_keys("1111")
|
15,051 | e053c75ece06f3405196612cbff62fd340f11416 | #!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Top Composer: Top storage client
Stores the distributions computed by the Top Composer and assigns them an UUID
:author: Thomas Calmant
:license: Apache Software License 2.0
:version: 3.0.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard
import logging
import threading
# iPOPO Decorators
from pelix.ipopo.decorators import ComponentFactory, Requires, Instantiate, \
BindField, Validate, Invalidate
# Composer
import cohorte.composer
# ------------------------------------------------------------------------------
# Bundle version
import cohorte.version
__version__=cohorte.version.__version__
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
@ComponentFactory()
@Requires('_composer', cohorte.composer.SERVICE_COMPOSER_TOP)
@Requires('_status', cohorte.composer.SERVICE_STATUS_TOP)
@Requires('_stores', cohorte.composer.SERVICE_TOP_STORAGE,
aggregate=True, optional=True)
@Instantiate('cohorte-composer-top-storage-handler')
class TopStorageHandler(object):
"""
Stores the state of all compositions handled by this top composer
"""
def __init__(self):
"""
Sets up members
"""
# Top Composer
self._composer = None
# Top status
self._status = None
# Top storage services
self._stores = []
# Thread safety
self.__lock = threading.RLock()
@BindField('_stores', if_valid=True)
def _bind_store(self, field, svc, svc_ref):
"""
New Top storage service bound
"""
with self.__lock:
# Read what it has to give
self.handle_store(svc)
# Store what we started
self.store_all(svc)
@Validate
def _validate(self, context):
"""
Component validated
"""
with self.__lock:
if self._stores:
# Handle already bound stores
for store in self._stores:
# Read stored information
self.handle_store(store)
# Store the current information
self.store_all(store)
# Register to status modifications
self._status.add_listener(self)
@Invalidate
def _invalidate(self, context):
"""
Component invalidated
"""
self._status.remove_listener(self)
def distribution_added(self, uid, name, distribution):
"""
Called by the Top Status when a distribution has been stored
:param uid: UID of the new composition
:param name: Name of the new composition
:param distribution: Computed distribution
"""
with self.__lock:
content = {'name': name, 'distribution': distribution}
for storage in self._stores:
try:
storage.store(uid, content)
except Exception as ex:
_logger.error("Error storing distribution: %s", ex)
def distribution_removed(self, uid):
"""
Called by the Top Status when a distribution has been removed
:param uid: UID of the removed composition
"""
with self.__lock:
for store in self._stores:
store.remove(uid)
def handle_store(self, store):
"""
Handles a new Top Storage service
:param store: A Top Storage service
"""
# Get storage UIDs
uids = set(store.get_uids())
# Remove known UIDs
uids.difference_update(self._status.list())
for uid in uids:
# Reload all stored compositions
stored = store.load(uid)
name = stored['name']
distribution = stored['distribution']
try:
_logger.debug("Reloading %s - %s...", name, uid)
self._composer.reload_distribution(name, distribution, uid)
except KeyError:
# Already known distribution
pass
def store_all(self, store):
"""
Stores the content of the status to the given top storage
:param store: A top storage service
"""
for uid in self._status.list():
distribution = self._status.get(uid)
name = self._status.get_name(uid)
# Store data
store.store(uid, {'name': name, 'distribution': distribution})
|
15,052 | c764cb5ef114bd5b3e3a4a76689c119fea30bedd | # Vishok Srikanth
# early version testing how to download NSF search results
import os
import bs4
import sqlite3
# How to Set Up Linux Prerequisites:
# install selenium, pyvirtualdisplay
# sudo pip3 install selenium pyvirtualdisplay
# install xvfb and chromium
# sudo apt-get updtae
# sudo apt-get install xvfb chromium-browser
# install older version of chromedriver manually:
# Linux:https://chromedriver.storage.googleapis.com/index.html?path=2.26/
# Download and extract the appropriate archive somewhere.
# Open a terminal wherever the executable is (or use its full filepath)
# sudo mv chromedriver /usr/bin/chromedriver
from JS_browser import JS_browser
link_to_visit = "https://www.nsf.gov/awardsearch/advancedSearchResult?PIId&PIFirstName&PILastName&PIOrganization&PIState&PIZip&PICountry&ProgOrganization&ProgEleCode&BooleanElement=All&ProgRefCode&BooleanRef=All&Program&ProgOfficer&Keyword=retina%20connectomics&AwardNumberOperator&AwardAmount&AwardInstrument&ActiveAwards=true&OriginalAwardDateOperator&StartDateOperator&ExpDateOperator"
def download_nsf_search(url, ):
xml_download_element = '//a[@title="Export as XML"]/'
default_save_path = "/home/student/Downloads/Awards.xml"
output_path = "/home/student/cs122_MVR/Awards.xml"
browser = JS_browser(start_link = link_to_visit)
browser.download(xml_download_element, default_save_path, output_path)
browser.cleanup() |
15,053 | 3f7b7bfcd683cb7007c82d56117c21bbb2b44578 | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Category, Item, User
engine = create_engine('sqlite:///catalog.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
def getCategories():
"""
Loads category names
"""
cat = session.query(Category).all()
return [i.name for i in cat]
|
15,054 | ae188a599f9d27fc15b6f7f86c18e2e640a068ef | #!/usr/bin/python
#coding=utf-8
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
import sys, re
from simplified_scrapy.core.utils import printInfo
from simplified_scrapy.core.dictex import Dict
class XmlDictConfig(Dict):
def __init__(self, parent_element):
if parent_element.items():
self.update(Dict(parent_element.items()))
# self['text'] = parent_element.text
flag = False
for element in parent_element:
flag = True
if (not self.get(element.tag)):
self.update({element.tag: []})
dic = self.getDic(element)
self[element.tag].append(dic)
count = len(element)
if (count > 0):
self.ele2arr(dic, element)
if (not flag):
self.update({'tag':
parent_element.tag}) #, 'text':parent_element.text})
# def getDic(self,element):
# if element.items():
# dic = dict(element.items())
# dic['text'] = element.text
# else:
# dic={'text':element.text}
# return dic
# def ele2arr(self,dic,elements):
# if(not dic.get("children")):
# dic["children"] = {}
# if(not dic["text"]):
# dic["text"]=""
# for element in elements:
# if( not dic["children"].get(element.tag)):
# dic["children"].update({element.tag:[]})
# if(element.text):
# dic["text"] = dic["text"]+element.text
# if(element.tail):
# dic["text"] = dic["text"]+element.tail
# dic["children"][element.tag].append(self.getDic(element))
# def __getattr__(self,attr):
# if(self.get(attr)):
# return self.get(attr)
# else:
# return None
# def __getitem__(self,attr):
# if(self.get(attr)):
# return self.get(attr)
# else:
# return None
def convert2Dic(html):
try:
start = html.find('<')
end = html.find('>')
html = html[start + 1:end].strip('/').strip()
html = re.sub('(\\s| )+', ' ', html, 0)
html = re.sub('(\')+', '"', html, 0)
html = re.sub('(=\s*")+', '="', html, 0)
lstC = [] #list(html)
N = len(html)
i = 0
first = False
flag = False
while i < N:
if html[i] == '"':
lstC.append(html[i])
first = not first
elif not first and html[i] == '=' and html[i + 1] != '"':
lstC.append(html[i])
lstC.append('"')
flag = True
elif not first and flag and html[i] == ' ':
flag = False
lstC.append('"')
lstC.append(html[i])
else:
lstC.append(html[i])
i += 1
html = ''.join(lstC)
paras = html.split('"')
dic = Dict()
lastP = None
first = True
for para in paras:
if (first):
first = False
tmp = para.split()
dic['tag'] = tmp[0]
if (len(tmp) > 1):
lastP = tmp[1].strip().strip('=').strip()
continue
if (lastP):
if (not dic[lastP]):
dic[lastP] = para
else:
dic[lastP] += ' '
dic[lastP] += para
lastP = None
elif para:
if (para.find('=') > 0):
lastP = para.strip().strip('=').strip()
else:
dic[para] = ''
return dic
except Exception as err:
printInfo(err)
try:
tag = ''
if (html.find('</') < 0 and html.find('/>') < 0):
start = html.find('<')
end = html.find(' ', start + 1)
tag = '</' + html[start + 1:end] + '>'
tree = ET.XML(html + tag)
return XmlDictConfig(tree)
except Exception as err:
printInfo(err)
return None
|
15,055 | eee5aef969da745d00423d0c002e359bdcdfc182 | """
Routes Configuration File
"""
from system.core.router import routes
#=============
#Default Route
#=============
routes['default_controller'] = 'Pages'
#=============
#Access Routes
#=============
routes['GET']['/login'] = 'Pages#login'
routes['GET']['/logout'] = 'Auths#reset'
routes['POST']['/processreg'] = 'Users#process_reg'
routes['POST']['/processlog'] = 'Auths#process_log'
#=============
#Public Routes
#=============
routes['GET']['/home'] = 'Pages#index'
routes['GET']['/about'] = 'Pages#about'
routes['GET']['/contact'] = 'Pages#contact'
routes['GET']['/roster'] = 'Pages#roster'
routes['GET']['/roster/artist/<int:id>'] = 'Pages#artist'
routes['GET']['/services'] = 'Pages#services'
routes['GET']['/blog'] = 'Pages#blog'
routes['GET']['/blog/<int:id>'] = 'Pages#blogPost'
#=============
#Private Routes
#=============
routes['GET']['/dashboard'] = 'Pages#dashboard'
#=============
#Misc Routes
#=============
routes['POST']['/sendemail'] = 'Contacts#index'
#=============
#Staff Routes
#=============
#------------------------------------
#Serve Views
#------------------------------------
routes['GET']['/users/new'] = 'Users#newStaff'
#------------------------------------
#CRUD Routes
#------------------------------------
routes['POST']['/users'] = 'Users#create_user' #CREATE: Create a new user with data passed from a form
routes['GET']['/users'] = 'Users#index' #READ: Get all users in the DB now
routes['GET']['/users/<int:id>'] = 'Users#get_user' #READ: Get a single user by ID
routes['POST']['/users/<int:id>/edit'] = 'Users#update_user' #UPDATE: Get a single user by ID
routes['POST']['/users/<int:id>/delete'] = 'Users#delete_user' #DELETE: Delete a single user by ID
#vvvv *** Not Secure ***
routes['GET']['/users/<int:id>/admin'] = 'Users#make_admin' #UPDATE: Make staff member an admin
#^^^^ *** Not Secure ***
#=============
#Artist Routes
#=============
#------------------------------------
#Serve Form Views
#------------------------------------
routes['GET']['/artists/new'] = 'Artists#newArtist'
routes['GET']['/artists/profile/detailed/<int:artist_id>'] = 'Artists#newDetailProfile'
routes['GET']['/artists/profile/detailed/<int:artist_id>/edit'] = 'Artists#get_detailed_profile'
routes['GET']['/artists/profile/digital/<int:artist_id>'] = 'Artists#newDigitalProfile'
routes['GET']['/artists/profile/digital/<int:artist_id>/edit'] = 'Artists#get_digital_profile'
routes['GET']['/artists/profile/full/<int:artist_id>'] = 'Artists#get_artist_full_profile' #READ: Get a single artist by artist_id
#------------------------------------
#Artist Basic Profile CRUD Routes
#------------------------------------
routes['GET']['/artists'] = 'Artists#get_roster' #READ: Get all artists in the DB now
routes['POST']['/artists'] = 'Artists#create_artist' #CREATE: Create a new artist with data passed from a form
routes['GET']['/artists/<int:id>'] = 'Artists#get_artist' #READ: Get a basic artist profile by ID
routes['POST']['/artists/<int:id>/edit'] = 'Artists#update_artist' #UPDATE: Update a basic artist profile by ID
routes['POST']['/artists/<int:id>/delete'] = 'Artists#delete_artist' #DELETE: Delete a artist and all associated profiles by ID
routes['POST']['/artists/<int:id>/deactivate'] = 'Artists#deactivate_artist' #DELETE: Delete a artist and all associated profiles by ID
routes['POST']['/artists/<int:id>/activate'] = 'Artists#activate_artist' #DELETE: Delete a artist and all associated profiles by ID
#------------------------------------
#Artist Detailed Profile CRUD Routes
#------------------------------------
routes['POST']['/artists/profile/detailed/<int:artist_id>'] = 'Artists#create_detailed_profile' #CREATE: Create a new artist profile with data passed from a form
routes['POST']['/artists/profile/detailed/<int:artist_id>/edit'] = 'Artists#update_detailed_profile' #UPDATE: Get a single artist profile by artist_id
routes['POST']['/artists/profile/detailed/<int:artist_id>/delete'] = 'Artists#delete_detailed_profile' #DELETE: Delete a single artist profile by artist_id
#------------------------------------
#Artist Digital Profile CRUD Routes
#------------------------------------
routes['POST']['/artists/profile/digital/<int:artist_id>'] = 'Artists#create_digital_profile' #CREATE: Create a new digital profile with data passed from a form
routes['POST']['/artists/profile/digital/<int:artist_id>/edit'] = 'Artists#update_digital_profile' #UPDATE: Get a single digital profile by artist_id
routes['POST']['/artists/profile/digital/<int:artist_id>/delete'] = 'Artists#delete_digital_profile' #DELETE: Delete a single digital profile by artist_id
#=============
#Blog Routes
#=============
#------------------------------------
#CRUD Routes
#------------------------------------
routes['GET']['/blogs'] = 'Blogs#manageBlogs' #READ: Get all blog post
routes['GET']['/blogs/<int:id>'] = 'Blogs#get_blog' #READ: Get a single blog by ID
routes['POST']['/blogs'] = 'Blogs#create_blog' #CREATE: Create a new blog with data passed from a form
routes['POST']['/blogs/<int:id>/edit'] = 'Blogs#update_blog' #UPDATE: Update blog post by ID
routes['POST']['/blogs/<int:id>/delete'] = 'Blogs#delete_blog' #DELETE: Delete a single blog by ID
routes['GET']['/newBlog'] = 'Blogs#newBlog'
#=============
#Press Routes
#=============
#------------------------------------
#CRUD Routes
#------------------------------------
routes['GET']['/press/new/<int:artist_id>'] = 'PressList#newPress'
routes['GET']['/press/<int:artist_id>'] = 'PressList#index' #READ: Get a single pres by artist_id
routes['GET']['/press/<int:artist_id>/<int:id>'] = 'PressList#get_press' #READ: Get a single pres by artist_id
routes['POST']['/press/<int:artist_id>'] = 'PressList#create_press' #CREATE: Create a new pres with data passed from a form
routes['POST']['/press/<int:artist_id>/<int:id>/edit'] = 'PressList#update_press' #UPDATE: Get a single pres by artist_id
routes['POST']['/press/<int:artist_id>/<int:id>/delete'] = 'PressList#delete_press' #DELETE: Delete a single blog by artist_id
routes['POST']['/press/<int:artist_id>/delete'] = 'PressList#delete_all_press' #DELETE: Delete a single blog by artist_id
#=============
#Album Routes
#=============
|
15,056 | ff7715d93b61518a74454d89bdcb288deb89f8ff | # -*- coding: utf-8 -*-
# ==============================================================================
# MIT License
#
# Copyright (c) 2019 Albert Moky
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
"""
Filter
~~~~~~
Filter for delivering message
"""
from typing import Optional
from dimp import ID
from dimp import Envelope, ReliableMessage
from dimp import Content, TextContent
from dimp import HandshakeCommand
from ..common import Facebook, Database
class Filter:
def __init__(self, messenger):
super().__init__()
# messenger
self.__messenger = messenger
@property
def messenger(self): # ServerMessenger
return self.__messenger
@property
def facebook(self) -> Facebook:
return self.messenger.facebook
@property
def database(self) -> Database:
return self.facebook.database
def __name(self, identifier: ID) -> str:
profile = self.facebook.profile(identifier)
if profile is not None:
name = profile.name
if name is not None:
return name
return identifier
#
# check
#
def __check_blocked(self, envelope: Envelope) -> Optional[Content]:
sender = self.facebook.identifier(envelope.sender)
receiver = self.facebook.identifier(envelope.receiver)
group = self.facebook.identifier(envelope.group)
# check block-list
if self.database.is_blocked(sender=sender, receiver=receiver, group=group):
nickname = self.__name(identifier=receiver)
if group is None:
text = 'Message is blocked by %s' % nickname
else:
grp_name = self.__name(identifier=group)
text = 'Message is blocked by %s in group %s' % (nickname, grp_name)
# response
res = TextContent.new(text=text)
res.group = group
return res
def __check_login(self, envelope: Envelope) -> Optional[Content]:
# check remote user
user = self.messenger.remote_user
# TODO: check neighbour stations
# assert user is not None, 'check client for sending message after handshake accepted'
if user is None:
# FIXME: make sure the client sends message after handshake accepted
sender = self.facebook.identifier(envelope.sender)
session = self.messenger.current_session(identifier=sender)
assert session is not None, 'failed to get session for sender: %s' % sender
assert not session.valid, 'session error: %s' % session
else:
session = self.messenger.current_session(identifier=user.identifier)
assert session is not None, 'failed to get session for user: %s' % user
# check session valid
if not session.valid:
return HandshakeCommand.ask(session=session.session_key)
#
# filters
#
def check_broadcast(self, msg: ReliableMessage) -> Optional[Content]:
res = self.__check_login(envelope=msg.envelope)
if res is not None:
# session invalid
return res
res = self.__check_blocked(envelope=msg.envelope)
if res is not None:
# blocked
return res
def check_deliver(self, msg: ReliableMessage) -> Optional[Content]:
res = self.__check_login(envelope=msg.envelope)
if res is not None:
# session invalid
return res
res = self.__check_blocked(envelope=msg.envelope)
if res is not None:
# blocked
return res
def check_forward(self, msg: ReliableMessage) -> Optional[Content]:
res = self.__check_login(envelope=msg.envelope)
if res is not None:
# session invalid
return res
res = self.__check_blocked(envelope=msg.envelope)
if res is not None:
# blocked
return res
|
15,057 | d38e64f5a85de753accb854ebdc3e32bb9c41bc9 | #############################################
# #
# Code for various different classification #
# algorithms to be used in conjunction #
# with the wavelength selection algorithms. #
# #
#############################################
# Bad practice, but the warnings were getting annoying
import warnings
warnings.filterwarnings("ignore")
#Other imports
from abc import ABCMeta, abstractmethod
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import np_utils
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import KFold
from sklearn.tree import DecisionTreeClassifier
class Classifier:
"""
Code for the Classifier abstract base class.
The Classifier is the parent class for the
various classification classes/algorithms.
(Abstract base class is the same as an
interface in other programming languages)
"""
__metaclass__ = ABCMeta
def __init__(self, data):
self.data = data
self.model = None
def _split_inputs_outputs(self, data):
"""
Split out the data set into its
inputs and its labels.
"""
inputs = []
outputs = []
for point in data:
inputs.append(point[0])
outputs.append(point[1])
return np.array(inputs), np.array(outputs)
@abstractmethod
def score(self, test_data):
"""
Method to classify a test data set
and return the score in terms of
accuracy.
"""
pass
class knn(Classifier):
"""
k-nearest neighbors classifier.
"""
def __init__(self, data, k=3):
super(knn, self).__init__(data)
self.k = k
self.model = None
self.all_input, self.all_labels = self._split_inputs_outputs(self.data)
def _fit(self, data):
"""
Fit the knn model.
"""
train_in, train_labels = self._split_inputs_outputs(data)
clf = KNeighborsClassifier(n_neighbors=self.k)
clf.fit(train_in, train_labels)
return clf
def score_one(self, test_data):
"""
Return the accuracy attained by the
knn on the test data set.
"""
test_in, test_labels = self._split_inputs_outputs(test_data)
correct = 0
total = 0
for i, test_input in enumerate(test_in):
prediction = self.model.predict(test_input.reshape(1,-1))
if prediction[0] == test_labels[i]:
correct+=1
total+=1
return float(correct)/total
def score(self):
"""
Use 10-fold CV to produce an average score
"""
splits = 10
score = 0
kf = KFold(n_splits=splits, shuffle=True)
kf.get_n_splits(self.data)
for train_ind, test_ind in kf.split(self.data):
train = [self.data[ind] for ind in train_ind]
test = [self.data[ind] for ind in test_ind]
self.model = self._fit(train)
temp_score = self.score_one(test)
score += temp_score
return score/float(splits)
class dtree(Classifier):
"""
Sklearn's decision tree implementation.
"""
def __init__(self, data):
super(dtree, self).__init__(data)
self.model = None
self.all_input, self.all_labels = self._split_inputs_outputs(self.data)
def _fit(self, data):
"""
Fit the decision tree model.
"""
train_in, train_labels = self._split_inputs_outputs(data)
clf = DecisionTreeClassifier(min_samples_leaf=0.05)
clf.fit(train_in, train_labels)
return clf
def score_one(self, test_data):
"""
Return the accuracy attained by the
knn on the test data set.
"""
test_in, test_labels = self._split_inputs_outputs(test_data)
correct = 0
total = 0
for i, test_input in enumerate(test_in):
prediction = self.model.predict(test_input.reshape(1,-1))
if prediction[0] == test_labels[i]:
correct+=1
total+=1
return float(correct)/total
def score(self):
"""
Use 10-fold CV to produce a score
"""
splits = 10
score = 0
kf = KFold(n_splits=splits, shuffle=True)
kf.get_n_splits(self.data)
for train_ind, test_ind in kf.split(self.data):
train = [self.data[ind] for ind in train_ind]
test = [self.data[ind] for ind in test_ind]
self.model = self._fit(train)
temp_score = self.score_one(test)
score += temp_score
return score/float(splits)
class Logistic(Classifier):
"""
Logistic regression classifier
"""
def __init__(self, data, inputs=None, outputs=None):
super(Logistic, self).__init__(data)
self.inputs, self.labels = self._split_inputs_outputs(data)
self.model = self._fit(inputs, labels)
def _fit(self):
"""
Fit the logistic regression model.
"""
clf = LogisticRegression()
clf.fit(inputs, labels)
return clf
def score(self, test_data):
"""
Return the score obtained on the
test data set
"""
ins, outs = self._split_inputs_outputs(test_data)
return self.model.score(ins, outs)
class FFNN(Classifier):
"""
Feed-forward neural network
"""
def __init__(self, data):
super(FFNN, self).__init__(data)
self.inputs, self.labels = self._split_inputs_outputs(data)
def train(self):
"""
Train the neural network using Adam optimizer
"""
input_size = len(self.inputs[0])
output_size = len(set(self.labels))
hidden_size_1 = 15
hidden_size_2 = 15
# One hot encode the labels
encoder = LabelEncoder()
encoder.fit(self.labels)
enc_labels = encoder.transform(self.labels)
enc_labels = np_utils.to_categorical(enc_labels)
# Create the MLP
model = Sequential()
model.add(Dense(hidden_size_1, activation='relu', input_dim=input_size))
model.add(Dense(hidden_size_2, activation='relu'))
model.add(Dense(output_size, activation='softmax'))
# Compile model with optimizer and loss function
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Train the model
model.fit(self.inputs, enc_labels, steps_per_epoch=1000, epochs=20, verbose=2)
self.model = model
def score(self, test_data):
"""
Return the accuracy attained by the neural network
on the test set
"""
ins, outs = self._split_inputs_outputs(test_data)
# One hot encode the input/labels
encoder = LabelEncoder()
encoder.fit(outs)
enc_labels = encoder.transform(outs)
enc_labels = np_utils.to_categorical(enc_labels)
_, score = self.model.evaluate(ins, enc_labels, verbose=2)
return score
assert issubclass(knn, Classifier)
#Classifier.register(knn)
|
15,058 | 0911dcd6ceeaaa3ea94a626d40cbd98b46bed210 | from graphene_sqlalchemy import SQLAlchemyObjectType
from models import ModelTeacher, ModelStudent
import graphene
class TeacherAttribute:
# id = graphene.ID(description="row ID")
name = graphene.String(description="Name of the Teacher")
password = graphene.String(description="Password for Login")
class Teacher(SQLAlchemyObjectType, TeacherAttribute):
"""Teacher node."""
class Meta:
model = ModelTeacher
interfaces = (graphene.relay.Node, )
class StudentAttribute:
admno = graphene.String(description="Student's Admission Number")
section = graphene.String(description="Section of Student")
name = graphene.String(description="Name of Student")
password = graphene.String(description="Password of student")
teacher_id = graphene.ID(description="Teacher ID")
class Student(SQLAlchemyObjectType, StudentAttribute):
"""Student Node"""
class Meta:
model = ModelStudent
interfaces = (graphene.relay.Node, )
# query {
# teacherList {
# edges{
# node{
# id
# name
# }
# }
# }
# } |
15,059 | 238ae47cb3201b1be7a77c1825924af0c7bb4c38 | from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.tree import DecisionTree
from pyspark.mllib.linalg import Vectors
from pyspark import SparkContext
sc = SparkContext('local')
denseVec1 = LabeledPoint(1.0, Vectors.dense([3.0,5.0,1.0]))
denseVec2 = LabeledPoint(0.0, Vectors.dense([2.0, 0.0, 1.0]))
vectors = [denseVec1, denseVec2]
points = sc.parallelize(vectors)
print(points)
# 设置决策树参数,训练模型
numClasses = 3
categoricalFeaturesInfo = {}
impurity = "gini"
maxDepth = 5
maxBins = 32
decisionTreeModel = DecisionTree.trainClassifier(points, numClasses, categoricalFeaturesInfo, impurity, maxDepth, maxBins)
print("决策树模型:" + decisionTreeModel.toDebugString())
# 未处理数据,带入模型处理
denseVec = Vectors.dense(3.5, 2.0, 3.0)
predict = decisionTreeModel.predict(denseVec)
print("predict: %s\n", predict)
"""
决策树模型:DecisionTreeModel classifier of depth 1 with 3 nodes
If (feature 0 <= 2.5)
Predict: 0.0
Else (feature 0 > 2.5)
Predict: 1.0
predict: %s
1.0
""" |
15,060 | 12cbf77718427166a1252b5264f40f935557be97 | from django.core.exceptions import ImproperlyConfigured
settings = {
'ACTIVATION_URL': 'activate/{uid}/{token}',
'SEND_ACTIVATION_EMAIL': True,
'SEND_CONFIRMATION_EMAIL': True,
'SET_PASSWORD_RETYPE': False,
'SET_USERNAME_RETYPE': False,
'PASSWORD_RESET_CONFIRM_URL': 'password/reset/confirm/{uid}/{token}',
'PASSWORD_RESET_CONFIRM_RETYPE': True,
'PASSWORD_RESET_SHOW_EMAIL_NOT_FOUND': False,
'ROOT_VIEW_URLS_MAPPING': {},
'PASSWORD_VALIDATORS': [],
}
def get(key):
try:
return settings[key]
except KeyError:
raise ImproperlyConfigured('Missing settings: {}'.format(key))
|
15,061 | 2784588463e4fcfe4da10617f2c7c45a37ebb27b | from Person.Person import Person
def main():
bob = Person('bob')
bob.eat()
dob = Student('dob','ubb')
dob.eat()
dob.sleep()
main() |
15,062 | 1ba5e521d88d6464490a596a24713f1c692a798c | # -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
#! /usr/bin/env python
#
# Example script that shows how to perform the registration
from __future__ import print_function, absolute_import
import elastix
import matplotlib.pyplot as plt
import numpy as np
import imageio
import os
import SimpleITK as sitk
import sys
from mpl_toolkits.mplot3d import Axes3D
# IMPORTANT: these paths may differ on your system, depending on where
# Elastix has been installed. Please set accordingly.
ELASTIX_PATH = os.path.join(r'C:\Elastix\elastix.exe')
TRANSFORMIX_PATH = os.path.join(r'C:\Elastix\transformix.exe')
if not os.path.exists(ELASTIX_PATH):
raise IOError('Elastix cannot be found, please set the correct ELASTIX_PATH.')
if not os.path.exists(TRANSFORMIX_PATH):
raise IOError('Transformix cannot be found, please set the correct TRANSFORMIX_PATH.')
image_folder = "TrainingData"
param_file = 'Par0001affine.txt'
result_dir = r'Results/test'
fixed_subject = "p102"
moving_subject = "p107"
fixed_subject_path = os.path.join(image_folder,fixed_subject)
moving_subject_path = os.path.join(image_folder,moving_subject)
fixed_img_path = os.path.join(fixed_subject_path,'mr_bffe.mhd')
fixed_seg_img_path = os.path.join(fixed_subject_path,'prostaat.mhd')
moving_img_path = os.path.join(moving_subject_path,'mr_bffe.mhd')
moving_seg_img_path = os.path.join(moving_subject_path,'prostaat.mhd')
fixed_img = sitk.GetArrayFromImage(sitk.ReadImage(fixed_img_path))
fixed_seg_img = sitk.GetArrayFromImage(sitk.ReadImage(fixed_seg_img_path))
moving_img = sitk.GetArrayFromImage(sitk.ReadImage(moving_img_path))
moving_seg_img = sitk.GetArrayFromImage(sitk.ReadImage(moving_seg_img_path))
slice_id = 60
fixed_img_slice = fixed_img[slice_id,:,:]
fixed_seg_img_slice = fixed_seg_img[slice_id,:,:]
moving_img_slice = moving_img[slice_id,:,:]
moving_seg_img_slice = moving_seg_img[slice_id,:,:]
# Define a new elastix object 'el' with the correct path to elastix
el = elastix.ElastixInterface(elastix_path=ELASTIX_PATH)
# Make a results directory if none exists
if not os.path.exists(result_dir):
os.mkdir(result_dir)
else:
if len(os.listdir(result_dir) ) != 0 and result_dir != r'Results/test':
inp = input("OK to overwrite? (y/n) ")
if inp != 'y':
sys.exit()
#fig, ax = plt.subplots(1, 3, figsize=(10, 10))
# Execute the registration. Make sure the paths below are correct, and
# that the results folder exists from where you are running this script
el.register(
fixed_image=fixed_img_path,
moving_image=moving_img_path,
parameters=[param_file],
output_dir=result_dir)
# Find the results
transform_path = os.path.join(result_dir, 'TransformParameters.0.txt')
result_path = os.path.join(result_dir, 'result.0.mhd')
#
# Open the logfile into the dictionary log
#fig = plt.figure()
#
#for i in range(4):
# log_path = os.path.join(result_dir, 'IterationInfo.0.R{}.txt'.format(i))
# log = elastix.logfile(log_path)
# # Plot the 'metric' against the iteration number 'itnr'
# plt.plot(log['itnr'], log['metric'])
#plt.legend(['Resolution {}'.format(i) for i in range(4)])
#
#
#fixed_image = sitk.GetArrayFromImage(sitk.ReadImage(fixed_image_path))
#moving_image = sitk.GetArrayFromImage(sitk.ReadImage(moving_image_path))
#moving_subject_img = sitk.GetArrayFromImage(sitk.ReadImage(result_path))
#moving_subject_img_slice = moving_subject_img[40,:,:]
#
## Load the fixed, moving, and result images
##fixed_image = imageio.imread(fixed_image_path)[:, :, 0]
##moving_image = imageio.imread(moving_image_path)[:, :, 0]
##transformed_moving_image = imageio.imread(result_path)
#
#
# Make a new transformix object tr with the CORRECT PATH to transformix
tr = elastix.TransformixInterface(parameters=transform_path,
transformix_path=TRANSFORMIX_PATH)
# Transform a new image with the transformation parameters
img_out = os.path.join(result_dir,'image')
seg_out = os.path.join(result_dir,'segmentation')
t_img_path = tr.transform_image(moving_img_path, output_dir=img_out)
t_seg_path = tr.transform_image(moving_seg_img_path, output_dir=seg_out)
t_img = sitk.GetArrayFromImage(sitk.ReadImage(t_img_path))
t_seg_img = sitk.GetArrayFromImage(sitk.ReadImage(t_seg_path))
t_img_slice = t_img[slice_id,:,:]
t_seg_img_slice = t_seg_img[slice_id,:,:]
#t_img = imageio.imread(t_img_path.replace('dcm', 'tiff'))[slice_id,:,:]
fig, ax = plt.subplots(2, 3, figsize=(10, 15))
ax[0,0].imshow(fixed_img_slice)#, cmap='gray')
ax[0,0].set_title('Fixed Image')
ax[1,0].imshow(fixed_seg_img_slice, cmap='gray')
ax[1,0].set_title('Segmentation')
ax[0,1].imshow(moving_img_slice)#, cmap='gray')
ax[0,1].set_title('Moving Image')
ax[1,1].imshow(moving_seg_img_slice, cmap='gray')
ax[1,1].set_title('Segmentation')
ax[0,2].imshow(t_img_slice)
ax[0,2].set_title('Transformed moving image')
ax[1,2].imshow(t_seg_img_slice, cmap='gray')
ax[1,2].set_title('Transformed segmentation')
pos_fixed = np.where(fixed_seg_img==1)
pos_moving = np.where(moving_seg_img==1)
pos_transformed = np.where(t_seg_img==1)
fig = plt.figure(figsize=plt.figaspect(0.33))
ax = fig.add_subplot(1,3,1, projection='3d')
ax.scatter(pos_fixed[0], pos_fixed[1], pos_fixed[2], c='black')
ax.set_title('Fixed')
ax = fig.add_subplot(1,3,2, projection='3d')
ax.scatter(pos_moving[0], pos_moving[1], pos_moving[2], c='black')
ax.set_title('Moving')
ax = fig.add_subplot(1,3,3, projection='3d')
ax.scatter(pos_transformed[0], pos_transformed[1], pos_transformed[2], c='black')
ax.set_title('Transformed')
plt.show()
#
## Get the Jacobian matrix
#jacobian_matrix_path = tr.jacobian_matrix(output_dir=result_dir)
#
## Get the Jacobian determinant
#jacobian_determinant_path = tr.jacobian_determinant(output_dir=result_dir)
#
## Get the full deformation field
#deformation_field_path = tr.deformation_field(output_dir=result_dir)
#
#jacobian_image = imageio.imread(jacobian_determinant_path.replace('dcm', 'tiff'))
#jacobian_binary = jacobian_image>0
#
## Add a plot of the Jacobian determinant (in this case, the file is a tiff file)
#ax[3].imshow(jacobian_binary,cmap='gray')
#ax[3].set_title('Jacobian\ndeterminant')
#
# Show the plots
#[x.set_axis_off() for x in ax]
|
15,063 | 7003d016330aa142444c972371336d485304c602 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 14 08:27:28 2020
@author: Ken
"""
data_in = [1989, 31, 0, 0, 0, 0, 0, 0, 0, 2021, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0] |
15,064 | 624974edf011ef4c36c32d7886add23c2953f9ab | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import math
import sys
n=int(input())
# n,t=list(map(int,input().split()))
# serial=input().split()
# a=list(map(int,input().split()))
for i in range(n):
num=int(input())
a=list(map(int,input().split()))
a_u=list(set(a))
count=[]
for n in a_u:
count.append(a.count(n))
sum=0
for c in count:
sum+=(c//2)
print(sum*2) |
15,065 | 1fd79f2b1ccd83ba2bfe82558699b7e33eee034f | import requests
from bs4 import BeautifulSoup
from settings import COOKIE
import pickle
def bs_css_parse_movies(html, j):
res_list = []
soup = BeautifulSoup(html, "lxml")
div_list = soup.select("tbody > tr > th > a")
time_list = soup.select("tbody > tr > td.by > em")
page_list = soup.select("tbody > tr > th > span.tps > a")
page_dict = {}
for page in page_list:
page_dict['http://bbs.yingjiesheng.com/thread-' + page["href"].split("-")[1] +
"-1-" + str(j) + ".html"] = page.text.strip()
for i, each in enumerate(div_list):
url = 'http:' + each["href"]
res_list.append({'url': url,
'created_time': time_list[2 * i].text.strip(),
'last_time_reply': time_list[2 * i + 1].text.strip(),
'page': page_dict.get('http:' + each["href"], 1)})
return res_list
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'}
cookies = COOKIE
res = []
for i in range(1, 48):
url = 'http://bbs.yingjiesheng.com/forum-683-' + str(i) + '.html'
r = requests.get(url, cookies=cookies, headers=headers)
c = bs_css_parse_movies(r.content, i)
res += c
with open('urls.pk', 'wb') as f:
pickle.dump(res, f)
with open('urls.pk', 'rb') as f:
print(pickle.load(f))
|
15,066 | 5716d2c60efea6f9ede60c07870844edd25ec134 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import json
from flask import Flask
from flask import Markup
from flask import g, render_template, url_for, redirect, abort, request
from datetime import date, datetime
app = Flask(__name__)
app.debug = True
page = {
'title': 'Five years later, how does Hurricane Sandy compare to other major storms?',
'title_twitter': 'Comparing Hurricane Sandy to other major storms five years later'.decode('utf-8'),
'url': 'http://interactive.nydailynews.com/project/hurricane-sandy-comparison/',
'description': 'Compare Hurricane Sandy to Hurricanes Maria, Irma, Harvey, Irene, Katrina and Andrew.',
'author': '"Interactive Project", "Kelli R. Parker"',
'datestamp': '2017-10-26',
'keywords': 'Hurricane Sandy, Hurricane Maria, Hurricane Irma, Hurricane Harvey, Hurricane Katrina, Hurricane Irene, Hurricane Andrew',
'keywords_array': '"hurricane sandy","hurricane harvey","hurricanes","hurricane katrina","hurricane irma"',
'shareimg': 'hurricane-harvey-animation.gif',
'sharevideo': '',
'shareimg_static': 'hurricane-sandy-from-space.jpg',
'shareimgdesc': 'A photo of Hurricane Sandy from space.',
}
with app.app_context():
app.url_root = '/'
app.page = page
app.sitename = ''
@app.route('/')
def index():
response = {
'app': app,
}
return render_template('index.html', response=response)
@app.template_filter(name='last_update')
def last_update(blank):
""" Returns the current date. That means every time the project is deployed,
the datestamp will update.
Returns a formatted date object, ala "Friday Feb. 20"
"""
today = date.today()
return today.strftime('%A %B %d')
@app.template_filter(name='timestamp')
def timestamp(blank):
""" What's the current date and time?
"""
today = datetime.today()
return today.strftime("%A %B %d, %-I:%M %p")
@app.template_filter(name='ordinal')
def ordinal_filter(value):
""" Take a number such as 62 and return 62nd. 63, 63rd etc.
"""
digit = value % 10
if 10 < value < 20:
o = 'th'
elif digit is 1:
o = 'st'
elif digit is 2:
o = 'nd'
elif digit is 3:
o = 'rd'
else:
o = 'th'
return '%d%s' % (value, o)
app.add_template_filter(ordinal_filter)
if __name__ == '__main__':
app.run()
|
15,067 | 921e9b21773e7ab8ae3dc9698fdf9b4c1fa51931 | from datetime import date
from datetime import time
from datetime import datetime
from datetime import timedelta
# construct a basic timedelta and print
print(timedelta(days=365, hours=5, minutes=1))
# print today's date
now = datetime.now()
print("Today is: ", str(now))
# print today's date one year from now
print("One year from now it will be: ", str(now + timedelta(days=365)))
# create a time delta that uses more that one argument
print("In 2 days and 3 weeks, it will be " + str(now+timedelta(days=2, weeks=3)))
# calculate the date 1 week ago
t = datetime.now() - timedelta(weeks=1)
s = t.strftime("%A %B %d, %Y")
print ("One week ago it was: ", s)
# when is the next april fools day
today = date.today()
afd = date(today.year, 4, 1)
if afd < today:
print("April fool's day already went by %d days ago" % ((today-afd).days))
afd = afd.replace(year = today.year+1)
time_to_afd = afd-today
print("It's just ", time_to_afd.days, "days until April Fool's Day") |
15,068 | e77ca30c538d3001723dbfebdbed8936561a2601 | from django.shortcuts import render
from django.http import HttpResponse
from .forms import UserRegisterForm
from django.shortcuts import redirect
# Create your views here.
def index(request):
return HttpResponse("Hello, world. You're at the polls index.")
def team(request):
return render(request, 'player/team.html')
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'player/register.html', {'form': form})
|
15,069 | c8464abd6ed18f88b13c1d6635e732ff477d3949 | # Generated by Django 3.2.4 on 2021-06-30 11:38
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('apply', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='project',
name='contest_contents',
field=models.TextField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='project',
name='project_contents',
field=models.TextField(default=django.utils.timezone.now),
preserve_default=False,
),
]
|
15,070 | 7dfdec41fc858f88c14c4b7c9a29f8f4e9cff4f5 | class Solution:
def dailyTemperatures(self, temperatures):
n = len(temperatures)
stack = []
res = [0] * n
for i in range(n):
while stack and temperatures[i] > temperatures[stack[-1]]:
idx = stack.pop()
res[idx] = i - idx
stack.append(i)
return res
# O(n) = n!
# class Solution:
# def dailyTemperatures(self, temperatures):
# temperatures = deque(temperatures)
# res = []
# while temperatures:
# t = temperatures.popleft()
# if_larger = False
# for i, v in enumerate(temperatures):
# if v > t:
# res.append(i + 1)
# if_larger = True
# break
# if not if_larger:
# res.append(0)
# return res
if __name__ == "__main__":
temperatures = [73,74,75,71,69,72,76,73]
sol = Solution()
print(sol.dailyTemperatures(temperatures))
|
15,071 | 6b82f87651515457f8f2d0edf702fe34772cccc7 | import RPi.GPIO as GPIO
import time
import random
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(17,GPIO.OUT)
GPIO.setup(27,GPIO.OUT)
GPIO.setup(22,GPIO.OUT)
GPIO.setup(26, GPIO.OUT)
GPIO.output([17, 27, 22, 26], GPIO.HIGH)
time.sleep(1)
GPIO.output([17, 27, 22, 26], GPIO.LOW)
for i in range(0,20):
r = random.random()
if r<0.3:
led = 17
elif r<0.6:
led = 27
elif r<0.9:
led = 22
else:
led = 26
print(led)
GPIO.output(led, GPIO.HIGH)
time.sleep(0.1)
GPIO.output(led, GPIO.LOW)
for i in range(5):
GPIO.output(led, GPIO.HIGH)
time.sleep(0.5)
GPIO.output(led, GPIO.LOW)
time.sleep(0.1)
#GPIO.output([17, 27, 22, 26], GPIO.LOW) |
15,072 | 1c66747cbfc80206c95d30deaac85c7fca63204c | #!/usr/bin/env python
# coding: utf-8
# # House Price
# ## Import Libraries
# In[1]:
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.svm import SVC, SVR
from sklearn.metrics import mean_squared_error, mean_absolute_error
import xgboost as xgb
# In[2]:
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
# ## Load Dataset
# In[3]:
df = pd.read_csv("houseprice.csv", sep=";", decimal=",")
df.head()
# ### Explore Dataset
# In[4]:
df.dtypes
# In[5]:
df.describe()
# In[6]:
df.corr()
# In[7]:
#Distribution of data
df.SalePrice.hist()
# In[202]:
#Search outliers
sns.pairplot(df)
# In[8]:
sns.set_theme(style="white")
# Generate a large random dataset
rs = np.random.RandomState(33)
# Compute the correlation matrix
corr = df.corr()
# Generate a mask for the upper triangle
mask = np.triu(np.ones_like(corr, dtype=bool))
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(230, 20, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
# In[9]:
#How many NaN
df.isna().sum()
# In[10]:
df.shape
# In[11]:
df.head(1)
# ### Delete NaN
# In[46]:
df_cl = df.copy()
# In[47]:
df_cl = df_cl.dropna(subset=['MSZoning', 'Utilities', 'Exterior1st', 'Exterior2nd', 'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath', 'KitchenQual', 'Functiol', 'GarageCars', 'GarageArea', 'SaleType'])
# In[48]:
df_cl.shape
# In[49]:
df_cl.isna().sum()
# In[50]:
df_cl = df_cl.drop(["Alley", "PoolQC", "Fence", "MiscFeature"], axis=1)
# In[51]:
cols_nona = df_cl.columns[df_cl.notna().all()]
# In[109]:
df_nona = df_cl[cols_nona].copy()
# In[110]:
df_nona.isna().sum()
# In[111]:
#Type of variables
df_nona.dtypes
# ### Create dummy variables
# In[112]:
df_objects = df_nona.select_dtypes(include='object')
# In[113]:
for c in df_objects.columns.values:
dummy = pd.get_dummies(df_nona[c])
df_nona = pd.concat([df_nona, dummy], axis=1)
df_nona = df_nona.drop(c, axis=1)
# In[114]:
df_nona.dtypes
# In[127]:
len(dummy_columns)
# In[130]:
df_combine = df_nona.groupby(df_nona.columns, axis=1).sum()
# In[133]:
df_combine.head()
# ## Models
# In[134]:
df_combine = df_combine.drop("Id", axis=1)
# ### Standard Data
# In[147]:
std = StandardScaler()
df_scale = std.fit_transform(df_combine)
df_scale = pd.DataFrame(df_scale, columns=df_combine.columns.values)
# In[148]:
df_scale.shape
# In[149]:
df_scale.head()
# ### Train Test Split
# In[150]:
X = df_scale.drop("SalePrice", axis=1)
y = df_scale["SalePrice"]
# In[151]:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=42)
# In[152]:
X_val, X_test, y_val, y_test = train_test_split(X_test, y_test, test_size=0.15, random_state=42)
# ### Linear Regression
# In[153]:
lr = LinearRegression(normalize=False, n_jobs=-1)
lr.fit(X_train, y_train)
lr.score(X_val, y_val)
# ### Select Variables
# In[154]:
print(X_train.shape)
print(X_train.columns.unique().shape)
# #### XGBoost
# In[156]:
xgbR = xgb.XGBRegressor()
xgbR.fit(X_train, y_train)
xgbR.score(X_val, y_val)
# In[164]:
fig, ax = plt.subplots(1,1, figsize=(20,50))
xgb.plot_importance(xgbR, ax=ax)
plt.show()
# In[174]:
len(xgbR.feature_importances_[np.where(xgbR.feature_importances_>0)])
# #### Best Variables
# In[185]:
selected_variables = X_train.columns.values[np.where(xgbR.feature_importances_>np.median(xgbR.feature_importances_))]
# In[186]:
selected_variables
# In[188]:
std = StandardScaler()
selected_variables = np.append(selected_variables, "SalePrice")
df_scale = std.fit_transform(df_combine[selected_variables])
df_scale = pd.DataFrame(df_scale, columns=selected_variables)
X = df_scale.drop("SalePrice", axis=1)
y = df_scale["SalePrice"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=42)
X_val, X_test, y_val, y_test = train_test_split(X_test, y_test, test_size=0.15, random_state=42)
# ### SVR
# In[189]:
svr = SVR(kernel="linear")
svr.fit(X_train, y_train)
svr.score(X_val, y_val)
# In[190]:
kernel = ['linear', 'poly', 'rbf', 'sigmoid']
C = np.arange(0.1, 1.5, 0.1)
epsilon = [0.01,0.1,0.5]
res = []
# In[191]:
print("Number of iterations: ",(len(kernel)*len(C)*len(epsilon)))
# In[192]:
get_ipython().run_line_magic('time', '')
i=1
for k in kernel:
for c in C:
for e in epsilon:
print("Iteration: {} -- SVR with {} C={} Epsilon={}".format(i,k,c,e))
svr = SVR(kernel=k, C=c, epsilon=e)
svr.fit(X_train, y_train)
print("Score: ",svr.score(X_val, y_val))
y_pred = svr.predict(X_val)
res.append({"c":c, "e": e, "kernel":k, "acc":svr.score(X_val, y_val), "mae":mean_absolute_error(y_val, y_pred) ,"mse":mean_squared_error(y_val, y_pred)})
i+=1
# In[193]:
minMae = 1000000
minConf = 0
for i in res:
if i["mae"] < minMae:
minMae = i["mae"]
minConf = i
# In[194]:
minMae
# In[195]:
minConf
# In[196]:
svrFinal = SVR(C=minConf["c"], epsilon=minConf["e"], kernel=minConf["kernel"])
svrFinal.fit(X_train, y_train)
# In[197]:
y_pred = svrFinal.predict(X_test)
print("Test MAE: ", mean_absolute_error(y_test, y_pred))
print("Test MSE: ", mean_squared_error(y_test, y_pred))
# In[198]:
y_pred = svrFinal.predict(X)
y_pred = pd.DataFrame({"SalePrice":y_pred})
res = pd.concat([y_pred, X], axis=1)
res = std.inverse_transform(res)
# In[201]:
print("Test MAE: ", mean_absolute_error(df_combine["SalePrice"], y_pred))
print("Test MSE: ", mean_squared_error(df_combine["SalePrice"], y_pred))
# In[ ]:
|
15,073 | 33efa1d5dafcb0e7064a019265130fb92ebc1bb0 | #!/bin/env python27
from getwiki import GlycoMotifWiki, GlyTouCanMotif
w = GlycoMotifWiki()
|
15,074 | 8ef489d3e84864bbed3be6f88fbe65342afd1637 | #! /usr/bin/env python
# CLI structure
import argparse
import webbrowser
import subprocess
import sys
parser = argparse.ArgumentParser(description='Global CLI for multiple usage', prog='bipbop', usage='%(prog)s [argument]', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-t','--tool', metavar='', type=str,help='Choose the tool you want to use', choices=['excel', 'slack'])
parser.add_argument('-w','--web', metavar='', type=str, help='Choose the web app you want to open', choices=['datafy', 'jira', 'gmail'])
args = parser.parse_args()
# Define web routes URL's
if args.web == 'datafy':
print('Opening Datafy UI...')
webbrowser.open('https://app.datafy.cloud/')
if args.web == 'jira':
print('Opening JIRA...')
webbrowser.open('https://luminus.atlassian.net/jira/projects?selectedProjectType=software')
if args.web == 'gmail':
print('Opening Gmail...')
webbrowser.open('https://www.gmail.com')
# Define tools path to .exe
if args.tool == 'excel':
print('Opening Excel...')
subprocess.run("/mnt/c/Program Files (x86)/Microsoft Office/root/Office16/EXCEL.EXE")
if args.tool == 'slack':
print('Opening Slack...')
subprocess.run("/mnt/c/Users/kevec/AppData/Local/slack/slack.exe")
if len(sys.argv) < 2 :
test_input = input('What is your name: ')
print(f'Dear {test_input}, please get a look at the help below:')
parser.print_help()
|
15,075 | 50eb3b794a4e908d5a3e0195b452a249cf89f6de | def isPalin(n,rev=0):
if n==0:
return rev
rev =rev*10+(n%10)
rev= reverse(n//10,rev)
return rev
def isPalin(num):
if num==reverse(num):
return 1
else:
return 0
n=101
print(reverse(n))
print(isPalin(n)) |
15,076 | de5f5e9eaea91f764e03242f66385e9b955fe4ef | import praw
import prawcore
import os
def reddit_login():
'''logs in the user using OAuth 2.0 and returns a redditor object for use'''
user_agent = 'PC:redditFavoriteGrab:v0.1 (by /u/Scien)'
r = praw.Reddit('mysettings', user_agent=user_agent)
try:
return r.user.me()
except prawcore.exceptions.Forbidden:
print('\nIt seems your credentials are invalid. Please check whether your praw.ini file is properly setup.')
return None
def main():
if os.path.isfile('./redditFavorites.txt'):
print('Please delete or move your current redditFavorites.txt to a safe place.')
return # exit the script if file problems
file = open('redditFavorites.txt','w')
redditor = reddit_login()
if redditor is None:
print('\nStopping script...')
return # exit the script if unable to log in to reddit
print('Welcome /u/{}. I will help you backup your saved posts on reddit :)'.format(redditor))
saved = redditor.saved(limit=None)
saved_posts = []
saved_comments = []
for post in saved: # separate out posts and commets
if isinstance(post, praw.models.Submission):
saved_posts.append(post)
elif isinstance(post, praw.models.Comment):
saved_comments.append(post)
for post in saved_posts:
# There is probably a better way to handle encoding here. I was failing in win due to console encoding differences.
file.write('[{0!a}] {1!a} - {2!a}\n'.format(post.shortlink, post.title, post.url))
print('Done creating a list of posts...')
for comment in saved_comments:
comment_url = comment.link_url + comment.id
file.write('[{0!a}] - Comment\n'.format(comment_url))
print('Done creating a list of comments...')
file.close()
if __name__ == '__main__':
main()
|
15,077 | 8539ef6278f56c1b1edcb288328ab1f25df4d369 | # message = 'It was a bright cold day in April, and the clocks were striking thirteen.'
# count = {}
# for character in message:
# count.setdefault(character, 0)
# count[character] = count[character] + 1
# print(count)
theBoard = {'top-L': ' ', 'top-M': ' ', 'top-R': ' ',
'mid-L': ' ', 'mid-M': ' ', 'mid-R': ' ',
'low-L': ' ', 'low-M': ' ', 'low-R': ' '}
def printBoard(board):
print(board['top-L'] + '|' + board['top-M'] + '|' + board['top-R'])
print('-+-+-')
print(board['mid-L'] + '|' + board['mid-M'] + '|' + board['mid-R'])
print('-+-+-')
print(board['low-L'] + '|' + board['low-M'] + '|' + board['low-R'])
#printBoard(theBoard)
turn = 'X'
for i in range(9):
printBoard(theBoard)
print('Turn for ' + turn + '. Move on which space? (q to quit):')
move = input()
if move.lower() == 'q':
break
else:
theBoard[move] = turn
if turn == 'X':
turn = 'O'
else:
turn = 'X'
#Chapter 5 Questions
# 1. What does the code for an empty dictionary look like?
emptyDict = {}
# 2. What does a dictionary value with a key 'foo' and a value 42 look like?
myDict = {'foo': 42}
# 3. What is the main difference between a dictionary and a list?
# Dictionary is unordered, and has access by keys
# 4. What happens if you try to access spam['foo'] if spam is {'bar': 100}?
# KeyError
# 5. If a dictionary is stored in spam, what is the difference between the expressions 'cat' in spam and 'cat' in spam.keys()?
# They are different ways to refernce keys in the dictionary
print()
spam = {'tall': '12oz', 'grande':'16oz', 'venti': '24oz', 'trenta': '31oz'}
for d, s in spam.items():
print(d + ' cup size is ' + s)
print()
# spam['cat'] is a key that will return the value associated with the key 'cat'
# spam.keys() will return the list of all keys
# 6. If a dictionary is stored in spam, what is the difference between the expressions 'cat' in spam and 'cat' in spam.values()?
# spam['cat'] is a value referenced by a key; spam.values() is a list of all values in the spam dictionary
# 7. What is a shortcut for the following code?
#if 'color' not in spam:
# spam['color'] = 'black'
# Answer:
# spam.setdefault('color', 'black')
# 8. What module and function can be used to “pretty print” dictionary values?
# Module: pprint, function: pprint: pprint.pprint(dict)
# For practice, write programs to do the following tasks.
# Fantasy Game Inventory
# You are creating a fantasy video game. The data structure to model the player’s inventory will be a dictionary where the keys
# are string values describing the item in the inventory and the value is an integer value detailing how many of that item the
# player has. For example, the dictionary value {'rope': 1, 'torch': 6, 'gold coin': 42, 'dagger': 1, 'arrow': 12} means the
# player has 1 rope, 6 torches, 42 gold coins, and so on.
# Write a function named displayInventory() that would take any possible “inventory” and display it like the following:
# Inventory:
# 12 arrow
# 42 gold coin
# 1 rope
# 6 torch
# 1 dagger
# Total number of items: 62
# Hint: You can use a for loop to loop through all the keys in a dictionary.
# inventory.py
stuff = {'rope': 1, 'torch': 6, 'gold coin': 42, 'dagger': 1, 'arrow': 12}
more_stuff = {'backpack': 1, 'spellbook': 3, 'pointy hat': 1, 'robes': 2}
def displayInventory(inventory):
print("Inventory:")
item_total = 0
for k, v in inventory.items():
# FILL IN THE CODE HERE
print(k + ' ' + str(v))
item_total += v
print("Total number of items: " + str(item_total))
displayInventory(stuff)
print()
displayInventory(more_stuff)
# List to Dictionary Function for Fantasy Game Inventory
# Imagine that a vanquished dragon’s loot is represented as a list of strings like this:
dragonLoot = ['gold coin', 'dagger', 'gold coin', 'gold coin', 'ruby']
# Write a function named addToInventory(inventory, addedItems), where the inventory parameter is a dictionary representing
# the player’s inventory (like in the previous project) and the addedItems parameter is a list like dragonLoot. The addToInventory()
# function should return a dictionary that represents the updated inventory. Note that the addedItems list can contain multiples
# of the same item. Your code could look something like this:
def addToInventory(inventory, addedItems):
# my code follows
for item in addedItems:
if item in inventory.keys():
inventory[item] = inventory[item] + 1
else:
# using the subscript notation
inventory[item] = 1
return(inventory)
# Starter loot
inv = {'gold coin': 42, 'rope': 1}
# After raiding dragon's lair
dragonLoot = ['gold coin', 'dagger', 'gold coin', 'gold coin', 'ruby']
# Add up the takings!
inv = addToInventory(inv, dragonLoot)
# Show 'em what I got
print()
displayInventory(inv) |
15,078 | b31c04ca2c5b994c3fe69358df8bb967ac8cc0dd | import discord
from discord.ext import commands
import datetime
import itertools
from utils import get_member, log, error, success, EmbedColor, read_logs, is_admin
class User(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="commands")
async def _commands(self, ctx):
description = "A list of available commands. Some may be admin only.\nUse `help <command>` for more info about a command"
embed = discord.Embed(title="Commands", description=description, color=EmbedColor.dark_green)
for category, category_data in self.bot.data.command_metadata.items():
title = category
value = []
for command, command_info in category_data.items():
args = []
for argument, argument_info in command_info["arguments"].items():
if argument_info["required"]:
args.append(f"<{argument}>")
else:
args.append(f"[{argument}]")
value.append(command + " " + " ".join(args))
embed.add_field(name=title, value="\n".join(value), inline=False)
await ctx.send(embed=embed)
@commands.command()
async def help(self, ctx, command_name=None):
if command_name == None:
description = "Usage:\n```help <command>```"
description += "\nYou can also use the `commands` command to view the commands list"
embed = discord.Embed(title="Help", description=description, color=EmbedColor.dark_green)
await ctx.send(embed=embed)
else:
command_info = None
for category, category_data in self.bot.data.command_metadata.items():
for command, _command_info in category_data.items():
if command_name == command:
command_info = _command_info
if not command_info:
await ctx.send(embed=await error(f"Unknwn command '{command_name}'"))
return
description = command_info["description"]
if command_info["requires_admin"]:
description += "\nThis command is admin only"
args = []
for argument, argument_info in command_info["arguments"].items():
if argument_info["required"]:
args.append(f"<{argument}>")
else:
args.append(f"[{argument}]")
description += "\nUsage:"
description += "```" + command_name + " " + " ".join(args) + "```"
embed = discord.Embed(title=command_name, description=description, color=EmbedColor.dark_green)
for argument, argument_info in command_info["arguments"].items():
description = argument_info["description"]
if argument_info["required"]:
description += "\nThis argument is requred"
else:
description += "\nThis argument is optional"
if argument_info["default"]:
description += f". The default value is {argument_info['default']}"
embed.add_field(name=argument, value=description, inline=False)
await ctx.send(embed=embed)
@commands.command()
async def online(self, ctx):
guild = self.bot.get_guild(self.bot.data.guilds["main"])
member_count = guild.member_count
status_count = [0, 0, 0, 0, 0] # online, offline, idle, dnd, invisible
status_list = list(discord.Status)
for member in guild.members:
status_count[status_list.index(member.status)] += 1
stats = discord.Embed(color=EmbedColor.dark_green)
stats.add_field(name=f"Total: {member_count}", value="\n".join([
f"<:online:572884944813031434>{status_count[0]}",
f"<:idle:572884943898673174>{status_count[2]}",
f"<:do_not_disturb:572884944016113666>{status_count[3]}",
f"<:offline:572884944343269378>{status_count[1] + status_count[4]}"]))
await ctx.send(embed=stats)
@commands.command()
async def ping(self, ctx):
await ctx.send(embed=await success(f"Pong! ({round(self.bot.latency, 3) * 1000}ms)"))
@commands.command()
async def rule(self, ctx, number=None):
async def _error():
await ctx.send(embed=await error(f"Invalid rule number '{number}', must be 1-{len(self.bot.data.rules.keys())}"))
if number == None:
await _error()
return
try:
number = int(number)
except (ValueError, TypeError):
await _error()
return
if not 0 < number < len(self.bot.data.rules.keys())+1:
await _error()
return
rule = self.bot.data.rules[str(number)]
title = f"Rule {number} - {rule['title']}"
description = "\n".join(rule['description']) or u"\u200b"
embed = discord.Embed(title=title, description=description, color=EmbedColor.dark_green)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(User(bot)) |
15,079 | e5a04820f82c33ae59dc7d07469c5f839d2772e5 | import pyttsx3
import datetime
import speech_recognition as sr
import smtplib
import wikipedia
import webbrowser
import os
engine = pyttsx3.init('sapi5')
newVoiceRate = 135
engine.setProperty('rate',newVoiceRate)
voices= engine.getProperty('voices') #getting details of current voice
print(voices)
engine.setProperty('voice', voices[0].id)
def speak(audio):
engine.say(audio)
engine.runAndWait() #Without this command, speech will not be audible to us.
engine.runAndWait()
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour >= 0 and hour < 12:
speak("Good Morning Boss.")
elif hour >= 12 and hour<18:
speak("Good Afternoon Boss. ")
else:
speak("Good Evening Boss.")
speak("I am Friday")
def take_command():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening.....")
speak("Listening.....")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognizing.....")
speak("Recognizing.....")
query = r.recognize_google(audio, language="en-in") #Using google for voice recognition.
print(f"User Said: {query}\n") #User query will be printed.
except Exception as e:
print("Say that again please...") # Say that again will be printed in case of improper voice
speak("Say that again please...")
return "None" # None string will be returned
return query
def sendEmail(to, content):
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
speak("Enter your email.")
email = input("Enter your email: ")
speak("Enter your password.")
passwrd = input("Enter your password: ")
server.login(email, passwrd)
server.sendmail("prashantbhandari2007@gmail.com", to, content)
server.close()
if __name__=="__main__":
wishMe()
speak("How may i help you Boss?")
while True:
query = take_command().lower()
if "close" in query:
print("Ok, Quiting.....")
speak("Ok, Quiting.....")
break
elif "what is time" in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"Sir, the time is {strTime}")
elif 'wikipedia' in query:
try:
speak('Searching Wikipedia...')
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=5)
print("According to Wikipedia")
speak("According to Wikipedia")
print(results)
speak(results)
except:
print("Sorry no Information found.")
speak("Sorry no Information found.")
elif "open google" in query:
print("opening google...")
speak("opening google...")
webbrowser.open("google.com")
elif "open youtube" in query:
print("youtube.com")
webbrowser.open("youtube.com")
elif "play song" in query:
while True:
try:
print("Which song you want to hear?")
speak("Which song you want to hear?")
query2 = take_command().lower()
music_dir = 'D:\prashant\songs'
songs = os.listdir(music_dir)
print(songs)
os.startfile(os.path.join(music_dir, query2+".mp3"))
except:
print("Sorry no song found.")
speak("Sorry no song found.")
finally:
break
elif "play video" in query:
while True:
try:
print("Which video you want to see?")
speak("Which video you want to see?")
query2 = take_command().lower()
music_dir = 'D:\prashant\\videos'
songs = os.listdir(music_dir)
print(songs)
os.startfile(os.path.join(music_dir, query2+".mp4"))
except:
print("Sorry no video found.")
speak("Sorry no video found.")
finally:
break
elif "send email" in query:
try:
print("To whom, you want to send mail?")
speak("To whom, you want to send mail?")
to = input("To: ")
print("What should i say boss?")
speak("What should i say boss?")
content = input("Content: ")
sendEmail(to, content)
speak("sending.......")
print("sending.......")
print("Email send sucessfully")
speak("Email send sucessfully")
except:
print("Email sending failed.")
speak("Email sending failed.") |
15,080 | 99c8787895003c8b76f0f603d099d4ce008bda17 | zbior = set()
dodatnie = set()
y = 0
for y in dodatnie:
y += 2
if y == 100:
break
x = (input("Podaj liczbę (wpisz <stop> jeżeli chcesz skończyć): "))
while x != "stop":
x = int(x)
zbior.add(x)
if x == "stop":
break
|
15,081 | 19d64dc3d2bc2d7854dcc617d5c5402fbf7856e3 | import os
import subprocess
import sys
"""
This script speeds up the computation of the topology based attack.
Specifically it creates multiple subprocesses and assigns to it a subset of the attack data to conceal.
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--data', nargs='+', type=str, default=['BATADAL'])
args = parser.parse_args()
print(args.data)
dataset = args.data[0]
if dataset == 'BATADAL':
intervals = [[1,2],[3,4],[5,6],[7,8],[9,10],[11,12],[13,14]]
plcs = ['PLC_1','PLC_2', 'PLC_3', 'PLC_4',
'PLC_5', 'PLC_6', 'PLC_7', 'PLC_8', 'PLC_9'
]
if dataset == 'WADI':
intervals = [[1,2],[3,5],[6,7],[8,9],[10,11],[12,13],[14,15]]
plcs = ['PLC_1','PLC_2']
for interval in intervals:
print('python constrained_attack_PLC.py -d '+ dataset +' -a '+ str(interval[0])+' '+ str(interval[1])+' -p'+str(plcs))
pid = subprocess.Popen(['python', 'constrained_attack_PLC.py', '-d', dataset,'-a', str(interval[0]), str(interval[1]), '-p'] + plcs ) # Call subprocess
#os.system()
|
15,082 | b129f2fc0597d8c158d4316d8450d9dc32661ae7 | import os
import csv
csvpath = os.path.join("Resources", "election_data.csv")
out_path = os.path.join("analysis","analysis.txt")
total_votes = 0
with open(csvpath) as election_file:
election_reader = csv.reader(election_file, delimiter = ",")
header = next(election_reader)
candidates = []
Votes = []
max_vote = 0
max_candidates = []
for row in election_reader:
total_votes += 1
if row[2] not in candidates:
candidates.append(row[2])
Votes.append(1)
else:
candidates_index = candidates.index(row[2])
Votes[candidates_index] += 1
for i in range(len(Votes)):
if Votes[i] > max_vote:
max_vote = Votes[i]
candidate = candidates[i]
# overwrite original list with just 1 candidate
max_candidates = []
max_candidates.append(candidate)
elif Votes[i] == max_vote:
candidate = candidates[i]
max_candidates.append(candidate)
with open(out_path, 'w') as text_file:
print("Election Results",file=text_file)
print("-----------------------",file=text_file)
print("Total Votes: {}".format(total_votes),file=text_file)
print("-----------------------",file=text_file)
print(f"{candidates[0]}: {Votes[0]/total_votes*100:.3f}% ({Votes[0]})",file=text_file)
print(f"{candidates[1]}: {Votes[1]/total_votes*100:.3f}% ({Votes[1]})",file=text_file)
print(f"{candidates[2]}: {Votes[2]/total_votes*100:.3f}% ({Votes[2]})",file=text_file)
print(f"{candidates[3]}: {Votes[3]/total_votes*100:.3f}% ({Votes[3]})",file=text_file)
print("-----------------------",file=text_file)
print(f"Winner: {max_candidates}",file=text_file)
print("-----------------------",file=text_file)
text_file.close() |
15,083 | 72fbd5ce65daad47224817fae613733d49efd7eb | # encoding: utf-8
'''
Scenariusz testowy:
Rejestracja na stronie https://www.vinted.pl/
Warunki wstepne:
Przeglądarka otwarta na stronie https://www.vinted.pl/
Przypadek testowy:
Błędne imię (obecność znaku '@')
Kroki:
1. Zaakceptuj zgodę cookies.
2. Kliknij "Zarejestruj się".
3. Kliknij "Zarejestruj się" w nowym oknie.
4. Wprowadź błędne imię i prawidłowe nazwisko.
5. Podaj prawidłową nazwę profilu.
6. Wpisz prawidłowy adres e-mail.
7. Wpisz prawidłowe hasło.
8. Kliknij "Rejestracja".
Oczekiwany rezultat:
1. Użytkownik dostaje błąd na czerwono pod miejscem na wpisanie imienia:
"Proszę podaj swoje imię i nazwisko".
'''
import unittest
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from test_constants import SIGN_FORM_NAME_INPUT, INVALID_NAME, SIGN_FORM_LOGIN_INPUT, \
VALID_USER_NICK, SIGN_FORM_EMAIL_INPUT, VALID_EMAIL, SIGN_FORM_PASSWORD_INPUT, \
VALID_PASSWORD, SIGN_FORM_REGISTER_BTN, SIGN_FORM_INPUT_ERROR_HINT
from test_utils import accept_cookies
from test_utils import go_to_register_form
class VintedRegistration(unittest.TestCase):
"""
Test registration class
"""
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.maximize_window()
self.driver.get("https://www.vinted.pl/")
def test_wrong_name(self):
"""
test wrong name
"""
driver = self.driver
accept_cookies(driver)
go_to_register_form(driver)
name_field = driver.find_element_by_id(SIGN_FORM_NAME_INPUT)
name_field.send_keys(INVALID_NAME)
user_nick_field = driver.find_element_by_id(SIGN_FORM_LOGIN_INPUT)
user_nick_field.send_keys(VALID_USER_NICK)
email_field = driver.find_element_by_id(SIGN_FORM_EMAIL_INPUT)
email_field.send_keys(VALID_EMAIL)
password_field = driver.find_element_by_id(SIGN_FORM_PASSWORD_INPUT)
password_field.send_keys(VALID_PASSWORD)
registration_btt = WebDriverWait(driver, 30).until(
EC.visibility_of_element_located((By.XPATH, SIGN_FORM_REGISTER_BTN)))
registration_btt.click()
### TEST ###
error = WebDriverWait(driver, 30).until(
EC.visibility_of_element_located((By.XPATH, SIGN_FORM_INPUT_ERROR_HINT)))
assert error.text == "Proszę podaj swoje imię i nazwisko"
def tearDown(self):
self.driver.quit()
if __name__ == "__main__":
unittest.main(verbosity=2)
|
15,084 | 61fc2cb006e723f0ece42ec3a0db43cc24361be0 | import math
import matplotlib.pyplot as plt
yita_opt=0.970
fai0=1.00e17
arfa=5.00e3
n0=1.00e16
d=10.0e-4
M=4.5
tao=10e-3
D=5.00e-4
q=1.6e-19
k_B=1.38e-23
T_ref=T_a=300
G=100e-3
A_D=100
l=math.sqrt(D*tao)
print (l)
#D is the electron diffusion parameter,tao is the electron lifetime,fai0 is the optical efficiency of the glass cover,and arfa is the light absorption coefficient of the porous electrode
fai=fai0*yita_opt
#kB is the Boltzmann constant,d is the thin film thickness, Tref is the operating temprature at the reference condition, J is the current density of the DSSC.
#G=100mW/cm^2;
#yita_opt is the optical efficient of the glass cover;
#A_D is the effective area of the DSSC under illumination. G is the solar spectrum intensity;
#because of the effeciency of tempreture
J_sc=q*fai*l*arfa/(1-l*l*arfa*arfa)*(-l*arfa+math.tanh(d/l)+l*arfa*math.exp(-d*arfa)/math.cosh(d/l))
V_func=lambda J : k_B*T_ref*M/q*math.log1p(l*(J_sc-J)/(q*D*n0*math.tanh(d/l)))
#find the max current density J_sc
N=100
J=[x*J_sc/N for x in range(N)]
V=list(map(V_func,J))
P_D_ref=list(map(lambda J,V : J*A_D*V,J,V))
yita_D_ref=list(map(lambda P_D_ref : P_D_ref/(G*A_D),P_D_ref))
T=340
lambda0=0.00506e-3
beta=0.0278
P_D=list(map(lambda P_D_ref : P_D_ref-lambda0*(T-T_ref),P_D_ref))
yita_D=list(map(lambda yita_D_ref : yita_D_ref*(1-beta*(T-T_ref)),yita_D_ref))
print('Do you want to print the curve?y/n')
select1=input()
if select1=='y' :
plt.figure(figsize=(8,4))
# plt.plot(J, P_D_ref, 'b*')#,label="$sin(x)$",color="red",linewidth=2)
plt.plot(J, P_D_ref, 'r')
plt.plot(J,P_D)
plt.xlabel("current density (A/cm^2)")
plt.ylabel("output power (W)")
plt.ylim(0, max(P_D_ref)*1.1)
plt.title('the output charactoristics of DSSC')
plt.legend()
plt.show()
select2=input()
print(yita_D)
if select2=='y' :
plt.figure(figsize=(8,4))
# plt.plot(J, yita_D_ref, 'b*')#,label="$sin(x)$",color="red",linewidth=2)
plt.plot(J, yita_D_ref)
plt.plot(J,yita_D)
plt.xlabel("current density (A/cm^2)")
plt.ylabel("output effeciency")
plt.ylim(0, max(yita_D_ref)*1.1)
plt.title('the output charactoristics of DSSC')
plt.legend()
plt.show()
#when lambda0=0.00506mWK^-1,it fits well experimental data
#beta=0.0278K^-1
|
15,085 | 2e7be51cd7da903dda9930f7830ab407500cdb30 | #!/usr/bin/env python
import os
import errno
import time
import rospy
from uuv_world_ros_plugins_msgs.srv import SetCurrentVelocity
# Because of transformations
# import tf_conversions
# import tf2_ros
# from sensor_msgs.msg import JointState
from std_msgs.msg import Header
from nav_msgs.msg import Odometry
from sensor_msgs.msg import JointState
from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion
from gazebo_msgs.msg import LinkState
from gazebo_msgs.srv import GetLinkState
# import copy
from math import pi, cos, sin, atan2
from datetime import datetime
from quaternions import quaternion_to_yaw, quaternion_to_rpy, rpy_to_quaterion
"""
This script instantiates a listener on robot end effector pose
and a publisher to the ocean current action server and a pose to the vehicle pose controller
The ocean current is dynamically adjusted as a function of time using a sin wave.
This script records the
"""
class TurbulenceExperimentNode(object):
def __init__(self, node_name):
# Period of sinusoidal turbulence
self.period = 10
# Omega - angular frequency
self.w = 2*pi/self.period
# Maximum current speed
self.max_vel = 0.0
# Delay Time: Time to wait before starting currents
self.delay_time = 5.0
# Create log files
self.instantiate_logs()
# Create the node
rospy.init_node(node_name)
# wait for current velocity service to come up
self.current_velocity_service = '/hydrodynamics/set_current_velocity'
rospy.wait_for_service(self.current_velocity_service)
# create service proxy
try:
self.set_current_velocity = rospy.ServiceProxy(self.current_velocity_service, SetCurrentVelocity)
except rospy.ServiceException as exc:
print("Service did not process request: " + str(exc))
raise
# wait for model_link state service to come up
self.model_link_state_service = '/gazebo/get_link_state'
rospy.wait_for_service(self.model_link_state_service)
# create service proxy
try:
self.get_model_state = rospy.ServiceProxy(self.model_link_state_service, GetLinkState)
except rospy.ServiceException as exc:
print("Service did not process request: " + str(exc))
raise
# create a subscriber on the vehicle pose topic
# self.subscriber = rospy.Subscriber('/seabotix/pose_gt', Odometry, self.odom_callback)
# Publish to vehicle pose controllers
self.pub_vehicle_pose = rospy.Publisher('/seabotix/cmd_pose', PoseStamped, queue_size=10)
# Publish to arm state controller
self.pub_arm_pose = rospy.Publisher('/seabotix/alpha/arm_control/command', JointState, queue_size=10)
# The current_function reads in vehicle pose using self.position
# and returns a tuple (velocity, horizontal_angle, vertical_angle)
# dictating the parameters of the current at that pose
# Change this to a different function depending on your needs
self.current_function = self.sinusoid_current
self.current_info = (0,0,0) # variable to save current state in format [velocity, horizontal angle, vertical angle]
rospy.sleep(1)
self.rate = rospy.Rate(10.0)
self.start_time = self.get_time() # time in seconds
try:
while not rospy.is_shutdown():
self.loop()
self.rate.sleep()
except rospy.exceptions.ROSInterruptException as e:
print("ROS Interrupt Exception")
self.__del__()
def __del__(self):
rospy.loginfo("Ending turbulence experiment node, Setting velocities back to 0.")
self.set_current_velocity(0.0, 0.0, 0.0)
close(self.log_file_desc)
@classmethod
def get_time(cls):
"""
Returns the ROS time in seconds
"""
now = rospy.Time.now()
return now.secs + now.nsecs*(10**-9) # time in seconds
def loop(self):
"""
"""
# Get end effector pose
x, y, z, roll, pitch, yaw = self.get_end_effector_state()
# log: time, current_state, end effector pose
self.log_file_desc.write("\n{}, {}, {}, {}, {}, {}, {}, {}, {}, {}".format(self.get_time(),
self.current_info[0], self.current_info[1], self.current_info[2],
x, y, z, roll, pitch, yaw))
# Publish goal vehicle and arm poses
# if self.get_time() > self.start_time + self.delay_time + 10.0:
self.pub_goal_arm_pose()
self.pub_goal_vehicle_pose()
# Publish current pose
self.current_info = self.current_function()
self.set_current_velocity(*self.current_info)
print(self.get_time())
def instantiate_logs(self):
""" Creates the log directories for the runs and
saves initial run info
"""
# Log file
timestamp = datetime.now().strftime("%Y-%m-%dT%H%M%S")
self.log_dir = os.path.join("experiment_logs", timestamp)
# Create Log directory if it does not exist
try:
os.makedirs(self.log_dir)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
self.info_file = os.path.join(self.log_dir, "run_info.txt")
self.log_file = os.path.join(self.log_dir, "data.csv")
with open(self.info_file, "w+") as f:
f.write("Period = {}\nMaxVel = {}".format(self.period, self.max_vel))
self.log_file_desc = open(self.log_file, "w+")
self.log_file_desc.write("t, current_vel, current_h_angle, current_v_angle, x, y, z, roll, pitch, yaw")
def pub_goal_vehicle_pose(self):
""" Sends the goal vehicle pose to the simulator
"""
header = Header()
header.stamp = rospy.Time.now()
position = Point(20.5, -10, -85)
# position = Point(20.5, -10, -85)
yaw = pi
# Converting yaw to quaternion
# See https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles
# For better intuition about quaternions: https://eater.net/quaternions
orientation = Quaternion(*rpy_to_quaterion(0, 0, yaw))
pose = Pose(position, orientation)
pose_stamped_msg = PoseStamped(header, pose)
self.pub_vehicle_pose.publish(pose_stamped_msg)
def pub_goal_arm_pose(self):
current_time = self.get_time()
header = Header()
header.stamp = rospy.Time.now()
joint_names = ['alpha/joint1', 'alpha/joint2', 'alpha/joint3', 'alpha/joint4']
position = [0.0, pi/2, pi/2, 0.0]
velocity = [0, 0, 0, 0]
effort = [0, 0, 0, 0]
joint_state_msg = JointState(header, joint_names, position, velocity, effort)
self.pub_arm_pose.publish(joint_state_msg)
def get_end_effector_state(self):
response = self.get_model_state('seabotix::alpha/wrist_link', 'world')
if response.success:
state = response.link_state
position = state.pose.position
orient = state.pose.orientation
roll, pitch, yaw = quaternion_to_rpy(orient.w, orient.x, orient.y, orient.z)
# state also contains velocities, but unused for now
# state.twist.linear, state.twist.angular
return position.x, position.y, position.z, roll, pitch, yaw
def update_current(self):
""" Sends a command to the ros service to update ocean currents
based on vehicle position
"""
velocity, horizontal_angle, vertical_angle = self.current_function()
self.set_current_velocity(velocity, horizontal_angle, vertical_angle)
def sinusoid_current(self):
""" A simple time-varying current field in the x direction based on
a sinusoid.
"""
t = self.get_time() # time in seconds
offset_time = t - self.start_time - self.delay_time # time offset by start/delay time
# Don't start the currents until t > start + 10
if t < self.start_time + self.delay_time:
print("Waiting for delay time to pass: {}/{}".format(t, self.start_time + self.delay_time))
velocity = 0
else:
velocity = self.max_vel * sin(self.w*offset_time) # current velocity in m/s
vertical_angle = 0.0 # vertical angle in radians
# current velocity must be positive, so change horizontal angle
# such that if velocity < 0, push vehicle to left, else right
horizontal_angle = pi if velocity < 0 else 0
speed = abs(velocity)
return speed, horizontal_angle, vertical_angle
if __name__ == '__main__':
republisher = TurbulenceExperimentNode("TurbulenceExperimentNode")
rospy.spin() |
15,086 | dbab306a3d2fb5ced0ab946636628c5c18530db2 | import psycopg2
DB_NAME = "esxbzxjm"
DB_USER = "esxbzxjm"
DB_PASS = "xcokxIYMBjRCnQpVJBTSCSMBGvdCEzh-"
DB_HOST = "ziggy.db.elephantsql.com"
DB_PORT = "5432"
conn = psycopg2.connect(database = DB_NAME, user = DB_USER, password = DB_PASS, host = DB_HOST, port = DB_PORT)
print("Database connected successfully")
cur = conn.cursor()
cur.execute("DELETE FROM Entry_Prevention_System_Table WHERE id = 2")
conn.commit
print("Data deleted successfully")
print("Total rows affected: " + str(cur.rowcount)) |
15,087 | ddb0829a3ee5c0b8b0a9d55ed14e6f273cf93df0 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-11 04:14
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0015_auto_20170210_1108'),
]
operations = [
migrations.RemoveField(
model_name='sysuser',
name='file_kit',
),
migrations.RemoveField(
model_name='sysuser',
name='file_name',
),
]
|
15,088 | c8d0f9430cfc28968764ddb12cd1ab7c8f89674c | import numpy as np
import cv2
import time
help_message = '''
USAGE: peopledetect.py <video_name> ...
Press any key to continue, ESC to stop.
'''
def inside(r, q):
rx, ry, rw, rh = r
qx, qy, qw, qh = q
return rx > qx and ry > qy and rx + rw < qx + qw and ry + rh < qy + qh
def draw_detections(img, rects, thickness = 1):
for x, y, w, h in rects:
# the HOG detector returns slightly larger rectangles than the real objects.
# so we slightly shrink the rectangles to get a nicer output.
pad_w, pad_h = int(0.15*w), int(0.05*h)
cv2.rectangle(img, (x+pad_w, y+pad_h), (x+w-pad_w, y+h-pad_h), (0, 255, 0), thickness)
cap = cv2.VideoCapture('3.mp4')
pos_frame = cap.get(1)
list = [];
count=0
p=0
while True:
flag, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.resize(gray, (360, 240))
frame = cv2.resize(frame, (360, 240))
#cv2.imwrite("frame%d.jpg" % count, frame)
hog = cv2.HOGDescriptor()
hog.setSVMDetector( cv2.HOGDescriptor_getDefaultPeopleDetector() )
found, w = hog.detectMultiScale(gray, winStride=(12,12), padding=(32,32), scale=1.05)
found_filtered = []
for ri, r in enumerate(found):
for qi, q in enumerate(found):
if ri != qi and inside(r, q):
break
else:
found_filtered.append(r)
draw_detections(frame, found, 3)
draw_detections(frame, found_filtered, 3)
print ('%d (%d) found' % (len(found_filtered), len(found)))
list.append('%d (%d) found' % (len(found_filtered), len(found)))
# write the flipped frame
#outVideo.write(frame)
cv2.imshow('video',frame) # save frame as JPEG file
#out.write(frame)
#cv2.imwrite("frameout%d.jpg" % count, frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
file_name = time.strftime("Output - %Y%m%d-%H%M%S")
f = open(file_name, 'w+')
#for item in list:
f.write(str(list))
f.close()
break
count+=1
cap.release()
cv2.destroyAllWindows() |
15,089 | d916a86ed589f4e46a0c01847c13bb7c12ae354d |
import unittest
import mock
from datetime import datetime
class TestNumberEncoding(unittest.TestCase):
def test_encoder(self):
"""Ensure encoder produces expected encoded output."""
from sosbeacon.utils import number_encode
number = 123
encoded = number_encode(number)
self.assertEqual(encoded, 'b6')
def test_decoder(self):
"""Ensure decoded correctly decodes a known encoded number."""
from sosbeacon.utils import number_decode
encoded = 'b6'
number = number_decode(encoded)
self.assertEqual(number, 123)
def test_inverse(self):
"""Ensure decode(encode(number)) == number over a range of numbers."""
from sosbeacon.utils import number_decode
from sosbeacon.utils import number_encode
for number in range(0, 500000, 339):
encoded = number_encode(number)
decoded = number_decode(encoded)
self.assertEqual(number, decoded)
class TestInsertTasks(unittest.TestCase):
@mock.patch('google.appengine.api.taskqueue.Queue')
def test_insert_batch(self, queue_mock):
"""Ensure taskqueue.Queue.add is called exactly once."""
from sosbeacon.utils import insert_tasks
tasks = []
for i in xrange(1, 10):
tasks.append(object())
added = insert_tasks(tasks, 'default')
self.assertEqual(added, 9)
@mock.patch('google.appengine.api.taskqueue.Queue.add')
def test_splits_once(self, queue_add_mock):
"""Ensure task batches are split and insertion is retried on
TaskAlreadyExistsError.
"""
from google.appengine.api import taskqueue
from sosbeacon.utils import insert_tasks
def side_effect(*args):
if 2 in args[0]:
raise taskqueue.TombstonedTaskError('uh oh')
queue_add_mock.side_effect = side_effect
tasks = [i for i in xrange(0, 9)]
added = insert_tasks(tasks, 'default')
self.assertEqual(added, 8)
self.assertEqual(queue_add_mock.call_count, 7)
@mock.patch('google.appengine.api.taskqueue.Queue.add')
def test_splits_on_tombstoned(self, queue_add_mock):
"""Ensure task batches are split and insertion is retried on
TombstonedTaskError.
"""
from google.appengine.api import taskqueue
from sosbeacon.utils import insert_tasks
queue_add_mock.side_effect = taskqueue.TombstonedTaskError
tasks = [i for i in xrange(0, 7)]
added = insert_tasks(tasks, 'default')
self.assertEqual(added, 0)
self.assertEqual(queue_add_mock.call_count, 13)
@mock.patch('google.appengine.api.taskqueue.Queue.add')
def test_splits_on_taskexists(self, queue_add_mock):
"""Ensure task batches are split and insertion is retried on
TaskAlreadyExistsError.
"""
from google.appengine.api import taskqueue
from sosbeacon.utils import insert_tasks
queue_add_mock.side_effect = taskqueue.TaskAlreadyExistsError
tasks = [i for i in xrange(0, 10)]
added = insert_tasks(tasks, 'default')
self.assertEqual(added, 0)
self.assertEqual(queue_add_mock.call_count, 19)
class TestFormatDatetime(unittest.TestCase):
def test_date(self):
"""Ensure a date with no hours / minutes is retuned as a date."""
from sosbeacon.utils import format_datetime
date = datetime(year=2012, month=8, day=30)
encoded = format_datetime(date)
self.assertEqual('08/30/12', encoded)
def test_date_with_time(self):
"""Ensure a date with hours and minutes is retuned as a datetime."""
from sosbeacon.utils import format_datetime
date = datetime(year=2012, month=8, day=30, hour=7, minute=13)
encoded = format_datetime(date)
self.assertEqual('08/30/12 07:13', encoded)
def test_date_with_zero_hours(self):
"""Ensure a date with minutes but no hours is retuned as a datetime."""
from sosbeacon.utils import format_datetime
date = datetime(year=2012, month=8, day=30, hour=0, minute=13)
encoded = format_datetime(date)
self.assertEqual('08/30/12 00:13', encoded)
def test_date_with_zero_minutes(self):
"""Ensure a date with hours but no minutes is retuned as a datetime."""
from sosbeacon.utils import format_datetime
date = datetime(year=2012, month=8, day=30, hour=19, minute=0)
encoded = format_datetime(date)
self.assertEqual('08/30/12 19:00', encoded)
def test_non_input(self):
"""Ensure a missing date returns the empty string."""
from sosbeacon.utils import format_datetime
encoded = format_datetime(None)
self.assertEqual('', encoded)
class TestGetLatestDatetime(unittest.TestCase):
def test_no_lhs(self):
"""Ensure a missing lhs returns rhs."""
from sosbeacon.utils import get_latest_datetime
lhs = None
rhs = object()
result = get_latest_datetime(lhs, rhs)
self.assertIs(rhs, result)
def test_no_rhs(self):
"""Ensure a missing lhs returns rhs."""
from sosbeacon.utils import get_latest_datetime
lhs = object()
rhs = None
result = get_latest_datetime(lhs, rhs)
self.assertIs(lhs, result)
def test_larger_lhs(self):
"""Ensure a missing lhs returns rhs."""
from sosbeacon.utils import get_latest_datetime
lhs = datetime(2012, 9, 20, 3, 45)
rhs = datetime(2012, 9, 20, 2, 45)
result = get_latest_datetime(lhs, rhs)
self.assertIs(lhs, result)
def test_larger_rhs(self):
"""Ensure a missing lhs returns rhs."""
from sosbeacon.utils import get_latest_datetime
lhs = datetime(2012, 9, 20, 2, 59)
rhs = datetime(2012, 9, 20, 3, 00)
result = get_latest_datetime(lhs, rhs)
self.assertIs(rhs, result)
def test_equal_inputs(self):
"""Ensure a missing lhs returns rhs."""
from sosbeacon.utils import get_latest_datetime
lhs = rhs = datetime(2012, 9, 20, 2, 59)
result = get_latest_datetime(lhs, rhs)
self.assertIs(rhs, result)
self.assertIs(lhs, result)
|
15,090 | ed7ca0119417dd91622a6a17f7582999cd630376 | """
输入一个整数数组,判断该数组是不是某二叉搜索树的后序遍历的结果。
如果是则输出Yes,否则输出No。假设输入的数组的任意两个数字都互不相同。
二叉搜索树
如果节点的左子树不空,则左子树上所有结点的值均小于等于它的根结点的值;
如果节点的右子树不空,则右子树上所有结点的值均大于等于它的根结点的值;
任意节点的左、右子树也分别为二叉查找树
"""
# -*- coding:utf-8 -*-
class Solution:
def VerifySquenceOfBST(self, sequence):
# write code here
if sequence==None or len(sequence)==0:
return False
length=len(sequence)
root=sequence[-1]
for i in range(length):
if sequence[i]>root:
break
for j in range(i,length):
if sequence[j]<root:
return False
left=True
if i>0:
left=self.VerifySquenceOfBST(sequence[:i])
right=True
if i <length-1:
right=self.VerifySquenceOfBST(sequence[i:-1])
return left and right
if __name__ == '__main__':
sequence=[4,8,6,12,16,14,10]
s=Solution()
a=sequence[2:-1]
b=sequence[2:]
print(a,b) |
15,091 | 0d83854b0f2fba83f5698fe212ab45a2cf658a27 | import pytest
from sentinel_common.all_keywords_matcher import AllKeywordsMatcher
@pytest.mark.parametrize(
"keywords , text, expected",
[
(["Paris", "life"], "There is no life in Paris", ["life", "Paris"]),
(["Paris", "life"], "There is no -life- in ,Paris.",
["life", "Paris"]),
(["Paris", "life"], "There is no night in ,Pyris.", []),
([" "], "There is no night in ,Pyris.", []),
([""], "There is no night in ,Pyris.", []),
(["Big Data"], "This is the era of Big Data.", ["Big Data"]),
(["Big Data"], "This is the era of Bigger Data.", []),
],
)
def test_find_all_keywords(keywords, text, expected):
matcher = AllKeywordsMatcher(set(keywords))
assert matcher.all_occurring_keywords(text) == expected
|
15,092 | 103a68ca9986393c6148fec53b28592f7fbe6f7d | text = 'stressed'
print(text[::-1])
|
15,093 | cf28c13ddd9c3e20339c530ee3a70ae305060e62 | from unittest import TestCase
from unittest.mock import patch
import app
class AppTest(TestCase):
def test_print_header(self):
expected_header_text = '-----------\n Mad Libs \n-----------\n'
with patch('builtins.print') as mocked_print:
app.print_header()
mocked_print.assert_called_once_with(expected_header_text)
def test_get_noun(self):
value = 'Dog'
with patch('builtins.input', return_value=value) as mocked_input:
noun = app.get_noun()
mocked_input.assert_called_with('Enter a noun: ')
self.assertEqual(noun, value)
def test_get_verb(self):
value = 'walk'
with patch('builtins.input', return_value=value) as mocked_input:
verb = app.get_verb()
mocked_input.assert_called_with('Enter a verb: ')
self.assertEqual(verb, value)
def test_get_adjective(self):
value = 'blue'
with patch('builtins.input', return_value=value) as mocked_input:
adjective = app.get_adjective()
mocked_input.assert_called_with('Enter a adjective: ')
self.assertEqual(adjective, value)
def test_get_adverb(self):
value = 'quickly'
with patch('builtins.input', return_value=value) as mocked_input:
adverb = app.get_adverb()
mocked_input.assert_called_with('Enter a adverb: ')
self.assertEqual(adverb, value)
def test_print_mad_lib(self):
noun = 'Dog'
verb = 'walk'
adjective = 'blue'
adverb = 'quickly'
expected_mad_lib = f'Do you {verb} your {adjective} {noun} {adverb}? That\'s hilarious!'
with patch('builtins.print') as mocked_print:
app.print_mad_lib(noun, verb, adjective, adverb)
mocked_print.assert_called_once_with(expected_mad_lib) |
15,094 | 94c32e59847e0a223a343c15424ba07c6d154dbf |
# coding: utf-8
# In[1]:
import numpy as np
from numpy import linalg as LA
import tensorflow as tf
import matplotlib.pyplot as plt
# fix random seed for reproducibility
np.random.seed()
m = 1 # dimension
k_squared = 0.04
m_inv = 1.0
learning_rate = 1e-5
# epochs = 20000
epochs = 10000
batch_size = 1000000
x_stddev = 5
test_averaging=100
decay = 1 - 10*1e-10
# x_train = np.zeros((epochs,1))
# for i in range(0,epochs):
# x_train[i]=np.random.normal(scale = x_stddev)
# # x_train[i]=np.random.uniform(-20,20,1)'module' object has no attribute 'norm'
w1=([[ 0.0901572 , 0.2370918 , 0.6920835 , -0.45759007, -0.22167274,
-0.46439773, -0.45912468, 0.6203555 , 0.0419175 , 0.60444146,
4.952244 , 0.04192306, 0.53345317, 0.22216071, -0.24009007,
6.301405 , -0.50758445, -0.21116066, -0.37131187, -0.22089699,
0.04239784, 0.04331616, -0.18591626, -0.22142634, -0.4953288 ,
-0.23889707, 0.67850924, 0.5476355 , -0.7077681 , -1.0123378 ,
0.04195131, 0.22627208, -0.1888109 , 0.21195143, 0.44928712,
0.04276987, -0.20532611, 0.44252077, -0.04190878, 0.46343717,
-0.22356562, -0.5474644 , 0.04206235, -0.7823536 , -0.23852947,
0.26123488, -0.2369954 , -0.25654712, -0.25827566, 0.5539032 ,
0.22289808, 0.51685596, 1.0848937 , -0.6088887 , -0.04201594,
0.21767725, -0.23810348, -0.4646694 , -0.53889185, 1.1317953 ,
0.2089353 , -0.23368704, -5.6309223 , -0.2510263 , 0.71514434,
1.2417319 , 5.88868 , 0.4928691 , 0.2434442 , -0.54655886,
0.6717308 , 0.44354093, -0.7333635 , -0.6745134 , -0.04279398,
-0.7975697 , 0.22850451, -0.25397167, 0.2451518 , 1.1024855 ,
-0.53172445, 0.04208738, -0.04233624, 0.8983515 , 0.7710562 ,
-0.2548618 , -0.21645324, -1.0170518 , 0.9672949 , -0.23664552,
-0.22946735, 0.63287175, -0.79163665, -0.52115196, 0.21819146,
-0.22541553, 0.69617873, 0.73459744, 0.50693244, -0.24401082,
-0.5940728 , 1.3320855 , -1.140783 , 0.23237722, -1.1244652 ,
-5.6705046 , 0.2540727 , -0.04189253, -0.20804366, -0.04187457,
-0.21428825, 0.04335834, 0.96757776, -5.0284066 , -0.21626869,
-0.540456 , 0.51839244, 0.21898666, 0.9066629 , 0.22020821,
-0.50667083, 0.7983404 , -5.5656185 , -0.04212693, 0.25555643,
-0.45822552, 0.24277431, -0.04205061, 0.15989499, 0.23738208,
0.2237451 , 0.24180941, 0.49051645, -0.45438182, 0.47147265,
-0.04477705, -5.479455 , 0.04174316, 0.2551995 , 0.57939404,
-0.6557258 , -0.04206115, 0.6763663 , 0.23443314, 0.22873235,
-0.04198467, -0.4861976 , -0.6498148 , 0.44098404, -0.04172933]])
w2=([[-0.84504426],
[-0.51247114],
[-2.0340562 ],
[-0.76634175],
[ 0.61729795],
[-0.58101785],
[-0.6854419 ],
[ 0.6577067 ],
[-0.7736458 ],
[-1.8916265 ],
[-1.090016 ],
[-0.873359 ],
[ 0.42003942],
[-0.47995704],
[ 0.5497382 ],
[-2.1801522 ],
[-0.4831816 ],
[ 0.5648663 ],
[ 0.9415591 ],
[ 0.78689337],
[-0.91083336],
[-0.9763873 ],
[ 0.72957134],
[ 0.5560705 ],
[-0.4719117 ],
[ 0.5045661 ],
[ 0.66004866],
[-1.5987552 ],
[-0.4643787 ],
[-1.9016262 ],
[-0.96371204],
[-0.611284 ],
[ 0.65741754],
[-0.5599199 ],
[ 0.45351097],
[-0.97737604],
[ 0.7038435 ],
[ 0.5943796 ],
[ 0.9532466 ],
[ 0.7460163 ],
[ 0.5358916 ],
[-0.44170648],
[-0.9419488 ],
[-0.67798716],
[ 0.46497133],
[-0.391163 ],
[ 0.592325 ],
[ 0.45341557],
[ 0.43128943],
[ 0.41603804],
[-0.5674596 ],
[ 0.38761157],
[ 2.704492 ],
[-0.80798954],
[ 0.83548236],
[-0.5111326 ],
[ 0.6162054 ],
[-0.7550416 ],
[-0.4759281 ],
[-2.5150294 ],
[-0.50941396],
[ 0.49656197],
[-1.6215047 ],
[ 0.47244617],
[ 0.5376818 ],
[ 3.9775271 ],
[ 1.6411495 ],
[ 0.45862758],
[-0.47453666],
[-0.45376387],
[ 0.5765134 ],
[ 0.56581146],
[-1.1258857 ],
[-1.0639522 ],
[ 1.0760058 ],
[-1.235642 ],
[-0.53190786],
[ 0.47500044],
[-0.4640562 ],
[ 2.372436 ],
[-0.67921394],
[-1.0515941 ],
[ 1.1015248 ],
[ 1.4750271 ],
[-2.5024996 ],
[ 0.43387246],
[ 0.53801376],
[-2.327031 ],
[ 1.6461738 ],
[ 0.4792684 ],
[ 0.76675403],
[ 0.4892529 ],
[-1.1853842 ],
[-0.38456675],
[-0.80742 ],
[ 0.45512152],
[ 0.44872195],
[-2.1801472 ],
[ 0.67657053],
[ 0.40404373],
[-0.7937116 ],
[ 0.77783364],
[-2.4614215 ],
[-0.6792038 ],
[ 2.5339882 ],
[-1.5957985 ],
[-0.4930483 ],
[ 0.9237745 ],
[ 0.59356 ],
[ 0.9956936 ],
[ 0.47309944],
[-0.9341501 ],
[ 1.6710144 ],
[ 1.1764897 ],
[ 0.46367607],
[-0.7061653 ],
[ 0.46270266],
[-0.8225886 ],
[ 1.8290645 ],
[-0.5919749 ],
[-0.44208294],
[-1.948723 ],
[-1.3858926 ],
[ 0.8691517 ],
[-0.37294617],
[-0.6558015 ],
[-0.6871818 ],
[ 1.0781469 ],
[-0.87414324],
[-0.47635847],
[-0.5639866 ],
[-0.47552544],
[ 0.7286468 ],
[-0.34246516],
[ 0.6627983 ],
[ 0.7922385 ],
[-0.80032754],
[-0.6089186 ],
[-0.46824703],
[ 0.40888965],
[-0.56078476],
[ 0.98349524],
[ 0.48105317],
[-0.5328922 ],
[-0.70839876],
[ 1.0339078 ],
[-0.61342776],
[-0.79129976],
[ 0.48441455],
[ 0.5570059 ]])
# In[ ]:
b1=([-1.49632066e-01, 2.16088206e-01, 3.65778732e+00, -1.21041000e+00,
-1.35061651e-01, -1.29561055e+00, -1.22450840e+00, -2.32706118e+00,
-2.15838999e-02, 3.23842049e+00, 9.99821246e-01, 5.85471094e-02,
1.77022383e-01, 1.33129925e-01, -2.35600263e-01, -9.69530642e-01,
7.31552601e-01, -9.77801457e-02, -1.28652573e+00, 2.19140470e-01,
1.23102725e-01, -1.57810926e-01, 1.53959572e-01, -1.32225156e-01,
-1.57481730e-01, -2.27377295e-01, 4.70594555e-01, 2.85312033e+00,
3.12517256e-01, 5.74599028e+00, -2.24734023e-02, 1.56200081e-01,
-7.49236792e-02, 9.45027769e-02, -9.54202712e-01, -1.19746946e-01,
1.76245585e-01, -1.47855604e+00, 1.07089831e-02, 1.27336562e+00,
2.21104607e-01, -1.81072652e-01, 8.26996788e-02, -5.77640235e-01,
-2.25629151e-01, 3.77086610e-01, -2.16601476e-01, -3.45170379e-01,
-3.56887221e-01, -5.90745807e-01, -2.19919547e-01, -1.86245930e+00,
6.39037895e+00, 2.27631497e+00, -7.94772431e-02, 1.15748756e-01,
3.03080708e-01, -1.28756618e+00, -1.78790972e-01, -5.63836622e+00,
8.99272710e-02, -1.94928318e-01, -7.41466433e-02, -3.07720184e-01,
2.70801663e-01, 7.34310913e+00, 8.29299539e-02, -7.81100869e-01,
2.56538272e-01, -1.80862710e-01, 2.18636543e-01, 1.07038522e+00,
-2.78851628e+00, -2.51557636e+00, 1.20577067e-01, -3.08366776e+00,
1.66032478e-01, -3.27756613e-01, 2.67747581e-01, -6.31493044e+00,
-1.79744363e+00, -4.68141548e-02, 7.84308538e-02, -5.00692749e+00,
4.00230837e+00, -3.33558679e-01, -1.12384461e-01, -5.97595739e+00,
-5.45763254e+00, -2.12760210e-01, 2.53413409e-01, 1.98413730e-01,
4.21520996e+00, 6.86769903e-01, -2.12254256e-01, -1.46499500e-01,
4.68130678e-01, -4.72452021e+00, -1.81595242e+00, -2.60216951e-01,
2.21049786e+00, -1.94112194e+00, 6.55437994e+00, -2.68400759e-01,
5.60166454e+00, -7.55500719e-02, 3.28553319e-01, 6.42770529e-03,
-9.20422822e-02, -1.11987339e-02, -1.00595385e-01, -1.61407873e-01,
-5.45945311e+00, -1.01744628e+00, -1.06990181e-01, 1.96982336e+00,
1.70830369e-01, -2.16641054e-01, 5.29849386e+00, 1.28267542e-01,
7.34108150e-01, -4.16245031e+00, -7.15808198e-02, -9.45318416e-02,
3.37766856e-01, -1.21507788e+00, -3.34076196e-01, 4.01906781e-02,
-1.60489708e-01, 2.17334837e-01, 1.42836973e-01, 2.45796412e-01,
1.53452313e+00, 9.28530157e-01, 1.37115467e+00, 1.37233928e-01,
-6.79805875e-02, 4.52714004e-02, 3.36023450e-01, -2.64137276e-02,
-2.08564326e-01, 4.37483490e-02, 2.18686923e-01, 2.00063869e-01,
-2.48323262e-01, 2.81832628e-02, -1.50140417e+00, 2.45667958e+00,
-9.98386204e-01, -4.18332741e-02])
b2= ([-0.47102088])
w1_init = tf.constant_initializer(w1)
b1_init = tf.constant_initializer(b1)
w2_init = tf.constant_initializer(w2)
b2_init = tf.constant_initializer(b2)
# In[ ]:
w3=([[ 0.13879539, -0.13859922, -0.13828675, 0.13855068, 0.13883771,
-0.13857202, 0.13830268, -0.13850647, -0.13853791, -0.1384053 ,
0.1412606 , 0.13873734, -0.13856825, 0.13891087, 0.14233626,
-0.13833769, -0.13845266, -0.13875286, 0.13869044, 0.13887323,
0.13870366, -0.13859619, 0.13872615, 0.1389381 , -0.14017113,
0.13879456, -0.13882846, 0.1387299 , -0.1387175 , -0.13880575]])
w4=([[ 0.48300147],
[-1.2237911 ],
[-0.4338532 ],
[ 1.1876252 ],
[ 0.9885277 ],
[-1.3522526 ],
[ 1.0765364 ],
[-0.8109151 ],
[-1.0949211 ],
[-1.1373827 ],
[ 0.58461624],
[ 0.94439197],
[-1.2257808 ],
[ 1.2257911 ],
[ 0.37756 ],
[-0.9867794 ],
[-1.1356777 ],
[-0.7308337 ],
[ 1.3722858 ],
[ 1.1147363 ],
[ 1.1841174 ],
[-1.4234818 ],
[ 1.1866816 ],
[ 1.2437216 ],
[-0.7649936 ],
[ 0.8123563 ],
[-0.9246504 ],
[ 1.1653202 ],
[-1.1385669 ],
[-0.7842877 ]])
b3=([ 0.00128192, -0.02412418, -0.04015647, 0.02808985, 0.00440433,
-0.02689951, 0.051215 , -0.02894638, -0.02851768, -0.04047732,
-0.12068842, 0.01155403, -0.02665246, 0.00033755, -0.16250175,
-0.04577867, -0.0361448 , -0.00925925, 0.01730765, 0.00252271,
0.0154872 , -0.02517299, 0.01376084, -0.00146507, 0.07024308,
0.00624953, -0.00542742, 0.01337393, -0.01448685, -0.00600676])
b4=([0.13220455])
w3_init = tf.constant_initializer(w3)
b3_init = tf.constant_initializer(b3)
w4_init = tf.constant_initializer(w4)
b4_init = tf.constant_initializer(b4)
# In[2]:
# declare the training data placeholders
# input x - just one is x0
x0 = tf.placeholder(tf.float32, [None, 1])
# x1 = tf.placeholder(tf.float32, [None, 1])
# x1_noise = tf.placeholder(tf.float32, [None, 1])
# x2 = tf.placeholder(tf.float32, [None, 1])
# # now declare the output data placeholder
# u1 = tf.placeholder(tf.float32, [None, 1])
# u2 = tf.placeholder(tf.float32, [None, 1])
# y = tf.placeholder(tf.float32, [None, 2])
# In[3]:
# # now declare the weights connecting the input to the hidden layer
# W1 = tf.Variable(tf.random_normal([1, 150], stddev=0.03), name='W1')
# b1 = tf.Variable(tf.random_normal([150]), name='b1')
# # and the weights connecting the hidden layer to the u1 output layer
# W2 = tf.Variable(tf.random_normal([150, 1], stddev=0.03), name='W2')
# b2 = tf.Variable(tf.random_normal([1]), name='b2')
# # declare weights connecting x1+z to a hidden layer
# W3 = tf.Variable(tf.random_normal([1, 30], stddev=0.03), name='W1')
# b3 = tf.Variable(tf.random_normal([30]), name='b3')
# # and the weights connecting the hidden layer to the u1 output layer
# W4 = tf.Variable(tf.random_normal([30, 1], stddev=0.03), name='W2')
# b4 = tf.Variable(tf.random_normal([1]), name='b4')
u1 = tf.Variable(tf.random_normal([1]))
# In[4]:
# # calculate the output of the hidden layer
# hidden_out_1 = tf.add(tf.matmul(x0, W1), b1)
# hidden_out_1 = tf.nn.sigmoid(hidden_out_1)
# # # output layer
# u1 = tf.identity(tf.add(tf.matmul(hidden_out_1, W2), b2))
# # print(u1.get_shape())
# # x1 = u1 + x0
hidden_out_1 = tf.layers.dense(
x0, 150, tf.nn.tanh, use_bias=True, kernel_initializer=w1_init,
bias_initializer=b1_init, name='firstlayer')
u1 = tf.layers.dense(
hidden_out_1, m, activation=tf.identity, use_bias=True, kernel_initializer=w2_init,
bias_initializer=b2_init, name='secondlayer')
x1 = u1 + x0
# add noise to x1
z = tf.random_normal([1,1])
x1_noise = x1 + z
# hidden_out_2 = tf.add(tf.matmul(x1_noise, W3), b3)
# hidden_out_2 = tf.nn.sigmoid(hidden_out_2)
# u2 = tf.identity(tf.add(tf.matmul(hidden_out_2, W4), b4))
hidden_out_2 = tf.layers.dense(
x1_noise, 30, tf.nn.sigmoid, use_bias=True, name='thirdlayer',
kernel_initializer=w3_init, bias_initializer=b3_init)
u2 = tf.layers.dense(
hidden_out_2, m, activation=tf.identity, use_bias=True, name='fourthlayer',
kernel_initializer=w4_init, bias_initializer=b4_init)
x2 = x1 - u2
# In[5]:
# wits_cost = tf.add(tf.multiply(m_inv,tf.multiply(k_squared, tf.norm(u1,'euclidean'))),
# tf.multiply(m_inv,tf.norm(x2,'euclidean')))
# wits_cost = tf.norm(u1)
wits_cost = (k_squared*tf.norm(u1)**2 + tf.norm(x2)**2) / batch_size
adaptive_learning_rate = tf.placeholder_with_default(learning_rate, [])
optimizer = tf.train.AdamOptimizer(learning_rate=adaptive_learning_rate).minimize(wits_cost)
# In[6]:
# finally setup the initialisation operator
init_op = tf.global_variables_initializer()
# In[7]:
plt.figure()
# start the session
with tf.Session() as sess:
# initialize the variables
sess.run(init_op)
x_train = np.random.normal(size=epochs * batch_size * m, scale=x_stddev)
for epoch in range(epochs):
# x_batch = x_train[epoch].reshape(1,1)
x_batch = x_train[epoch: epoch + (batch_size * m)].reshape((batch_size, m))
# c = sess.run(optimiser, feed_dict={x0:np.zeros((10,1))})#{x0: x_batch})
# sess.run(optimiser, feed_dict={x0: x_batch})
_,cost = sess.run([optimizer, wits_cost], feed_dict={x0: x_batch,
adaptive_learning_rate: learning_rate * (decay**epoch)})
# print("Epoch:", (epoch + 1), "cost =", "{:.3f}")
# print(u1)
if epoch % 1 == 0:
print("Epoch: ", epoch, "Cost: ",cost)
plt.plot(epoch, cost, 'bo')
# Test over a continuous range of X
x0_test = np.linspace(-4*x_stddev, 4*x_stddev, num=1000)
u1_test, u2_test, x1_test = np.zeros((1, 1000)), np.zeros((1, 1000)), np.zeros(
(1, 1000))
for i in range(1000):
u1t, u2t, x1t = 0, 0, 0
for _ in range(test_averaging):
u1tmp, u2tmp, x1tmp = sess.run(
[u1, u2, x1],
feed_dict={x0: x0_test[i].reshape((1, 1))})
u1t += u1tmp
u2t += u2tmp
x1t += x1tmp
u1_test[0, i] = u1t / test_averaging
u2_test[0, i] = -u2t / test_averaging
x1_test[0, i] = x1t / test_averaging
with tf.variable_scope('firstlayer', reuse=True):
w1_post = tf.get_variable('kernel').eval()
b1_post = tf.get_variable('bias').eval()
with tf.variable_scope('secondlayer', reuse=True):
w2_post = tf.get_variable('kernel').eval()
b2_post = tf.get_variable('bias').eval()
with tf.variable_scope('thirdlayer', reuse=True):
w3_post = tf.get_variable('kernel').eval()
b3_post = tf.get_variable('bias').eval()
with tf.variable_scope('fourthlayer', reuse=True):
w4_post = tf.get_variable('kernel').eval()
b4_post = tf.get_variable('bias').eval()
print(cost)
plt.show()
l1, = plt.plot(x0_test, u1_test[0], label="U1 Test")
l3, = plt.plot(x0_test, u2_test[0], label="U2 Test")
plt.legend(handles=[l1, l3])
# plt.title("{} Unit NN With Activation Fn {}".format(
# str(num_units_1), str(activation_fn)))
# plt.savefig("figure_u_1.png
plt.show()
plt.clf()
l2, = plt.plot(x0_test, x1_test[0], label="X1 Test")
# plt.title("{} Unit NN With Activation Fn {}".format(
# str(num_units_1), str(activation_fn)))
plt.legend(handles=[l2])
# plt.savefig("figure_x_1.png")
plt.show()
# In[ ]:
w1_post.shape
np.array(w1).shape
np.linalg.norm(w1_post - np.array(w1))
# np.linalg.norm(w1)
# In[ ]:
np.linalg.norm(w4_post - np.array(w4))
# In[ ]:
# In[ ]:
|
15,095 | e05911eaf1f6275c7f10327f38359514c50f8e15 | """V2 backend for `asr_recog.py` using py:class:`espnet.nets.beam_search.BeamSearch`."""
import json
import logging
import pickle
import numpy as np
import os
import torch
from collections import OrderedDict
import random
from espnet.asr.asr_utils import add_results_to_json
from espnet.asr.asr_utils import get_model_conf
from espnet.asr.asr_utils import torch_load
from espnet.asr.pytorch_backend.asr import load_trained_model
from espnet.nets.asr_interface import ASRInterface
from espnet.nets.batch_beam_search import BatchBeamSearch
from espnet.nets.beam_search import BeamSearch
from espnet.nets.lm_interface import dynamic_import_lm
from espnet.nets.scorer_interface import BatchScorerInterface
from espnet.nets.scorers.length_bonus import LengthBonus
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.utils.io_utils import LoadInputsAndTargets
from espnet.utils.dataset import ChainerDataLoader
from espnet.utils.dataset import TransformDataset
from espnet.utils.training.batchfy import make_batchset
from espnet.asr.pytorch_backend.asr import CustomConverter, _recursive_to
from ctcdecode import CTCBeamDecoder
import numpy
def recog_v2(args):
"""Decode with custom models that implements ScorerInterface.
Notes:
The previous backend espnet.asr.pytorch_backend.asr.recog
only supports E2E and RNNLM
Args:
args (namespace): The program arguments.
See py:func:`espnet.bin.asr_recog.get_parser` for details
"""
logging.warning("experimental API for custom LMs is selected by --api v2")
if args.batchsize > 1:
raise NotImplementedError("multi-utt batch decoding is not implemented")
if args.streaming_mode is not None:
raise NotImplementedError("streaming mode is not implemented")
if args.word_rnnlm:
raise NotImplementedError("word LM is not implemented")
set_deterministic_pytorch(args)
model, train_args = load_trained_model(args.model)
# add lang2ph to the model
if args.mask_phoneme:
logging.warning(f'mask phoneme and create lang2ph for model')
assert args.lang2ph is not None
with open(args.lang2ph, 'r') as f:
model.lang2ph = json.load(f)
model.lang2phid = {}
for lang, phones in model.lang2ph.items():
phoneset = set(phones + ['<blank>', '<unk>', '<space>', '<eos>'])
phoneset = phoneset.intersection(model.args.char_list)
model.lang2phid[lang] = list(map(model.args.char_list.index, phoneset))
# model.lang2phid[lang] = list(map(model.args.char_list.index, phones+['<blank>', '<unk>', '<space>', '<eos>']))
model.ctc.lang2phid = model.lang2phid
logging.warning(f'model lang2phid {model.lang2phid}')
assert isinstance(model, ASRInterface)
model.eval()
load_inputs_and_targets = LoadInputsAndTargets(
mode="asr",
load_output=False,
sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf,
preprocess_args={"train": False},
)
logging.warning(f'args.rnnlm: {args.rnnlm}')
if args.rnnlm:
lm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
# NOTE: for a compatibility with less than 0.5.0 version models
lm_model_module = getattr(lm_args, "model_module", "default")
lm_class = dynamic_import_lm(lm_model_module, lm_args.backend)
lm = lm_class(len(train_args.char_list), lm_args)
torch_load(args.rnnlm, lm)
lm.eval()
else:
lm = None
if args.ngram_model:
from espnet.nets.scorers.ngram import NgramFullScorer
from espnet.nets.scorers.ngram import NgramPartScorer
if args.ngram_scorer == "full":
ngram = NgramFullScorer(args.ngram_model, train_args.char_list)
else:
ngram = NgramPartScorer(args.ngram_model, train_args.char_list)
else:
ngram = None
scorers = model.scorers()
scorers["lm"] = lm
scorers["ngram"] = ngram
scorers["length_bonus"] = LengthBonus(len(train_args.char_list))
weights = dict(
decoder=1.0 - args.ctc_weight,
ctc=args.ctc_weight,
lm=args.lm_weight,
ngram=args.ngram_weight,
length_bonus=args.penalty,
)
beam_search = BeamSearch(
beam_size=args.beam_size,
vocab_size=len(train_args.char_list),
weights=weights,
scorers=scorers,
sos=model.sos,
eos=model.eos,
token_list=train_args.char_list,
pre_beam_score_key=None if args.ctc_weight == 1.0 else "full",
)
# TODO(karita): make all scorers batchfied
if args.batchsize == 1:
non_batch = [
k
for k, v in beam_search.full_scorers.items()
if not isinstance(v, BatchScorerInterface)
]
if len(non_batch) == 0:
beam_search.__class__ = BatchBeamSearch
logging.info("BatchBeamSearch implementation is selected.")
else:
logging.warning(
f"As non-batch scorers {non_batch} are found, "
f"fall back to non-batch implementation."
)
if args.ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
if args.ngpu == 1:
device = "cuda"
else:
device = "cpu"
dtype = getattr(torch, args.dtype)
logging.info(f"Decoding device={device}, dtype={dtype}")
model.to(device=device, dtype=dtype).eval()
beam_search.to(device=device, dtype=dtype).eval()
js = read_json_data(model.args, args.recog_json)
# read json data
# with open(args.recog_json, "rb") as f:
# js = json.load(f)["utts"]
random.seed(args.seed)
items = list(js.items())
random.shuffle(items)
js = OrderedDict(items[:args.recog_size])
logging.warning(f'data json len {len(js)}')
import re
def get_lang(name):
s = name.split('_')[0]
s = re.sub(r'\d+$', '', s.split('-')[0]) if re.search('[a-zA-Z]+', s) else s
return s
new_js = {}
with torch.no_grad():
for idx, name in enumerate(js.keys(), 1):
logging.info("(%d/%d) decoding " + name, idx, len(js.keys()))
lang_labels = None
lang_labels_for_masking = None
if args.lang_label:
lang_label = get_lang(name)
if args.mask_phoneme:
lang_labels_for_masking = [lang_label] # true lang labels
if args.fake_lang_label:
lang_labels = [args.fake_lang_label]
batch = [(name, js[name])]
feat = load_inputs_and_targets(batch)[0][0]
enc = model.encode(torch.as_tensor(feat).to(device=device, dtype=dtype), lang_labels=lang_labels)
nbest_hyps = beam_search(
x=enc, maxlenratio=args.maxlenratio, minlenratio=args.minlenratio,
mask_phoneme=args.mask_phoneme, lang_labels_for_masking=lang_labels_for_masking
)
nbest_hyps = [
h.asdict() for h in nbest_hyps[: min(len(nbest_hyps), args.nbest)]
]
new_js[name] = add_results_to_json(
js[name], nbest_hyps, train_args.char_list
)
with open(args.result_label, "wb") as f:
f.write(
json.dumps(
{"utts": new_js}, indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
def recog_ctconly_mask(args):
"""Decode with custom models that implements ScorerInterface.
Notes:
The previous backend espnet.asr.pytorch_backend.asr.recog
only supports E2E and RNNLM
Args:
args (namespace): The program arguments.
See py:func:`espnet.bin.asr_recog.get_parser` for details
"""
logging.warning(f'RECOGCTCONLY')
logging.warning("experimental API for custom LMs is selected by --api v2")
if args.batchsize > 1:
raise NotImplementedError("multi-utt batch decoding is not implemented")
if args.streaming_mode is not None:
raise NotImplementedError("streaming mode is not implemented")
if args.word_rnnlm:
raise NotImplementedError("word LM is not implemented")
set_deterministic_pytorch(args)
model, train_args = load_trained_model(args.model)
assert isinstance(model, ASRInterface)
model.eval()
load_inputs_and_targets = LoadInputsAndTargets(
mode="asr",
load_output=False,
sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf,
preprocess_args={"train": False},
)
if args.ngpu == 1:
device = "cuda"
else:
device = "cpu"
dtype = getattr(torch, args.dtype)
logging.info(f"Decoding device={device}, dtype={dtype}")
model.to(device=device, dtype=dtype).eval()
# logging.warning(f'Recog deep [model.args] {model.args}')
with open(args.recog_json, "rb") as f:
recog_json = json.load(f)["utts"]
use_sortagrad = model.args.sortagrad == -1 or model.args.sortagrad > 0
converter = CustomConverter(subsampling_factor=model.subsample[0], dtype=dtype)
# make minibatch list (variable length)
recog = make_batchset(
recog_json,
16, # model.args.batch_size,
model.args.maxlen_in,
model.args.maxlen_out,
model.args.minibatches,
min_batch_size=model.args.ngpu if model.args.ngpu > 1 else 1,
shortest_first=use_sortagrad,
count=model.args.batch_count,
batch_bins=400000, #model.args.batch_bins,
batch_frames_in=model.args.batch_frames_in,
batch_frames_out=model.args.batch_frames_out,
batch_frames_inout=model.args.batch_frames_inout,
iaxis=0,
oaxis=0,
)
load_rc = LoadInputsAndTargets(
mode="asr",
load_output=True,
preprocess_conf=model.args.preprocess_conf,
preprocess_args={"train": False}, # Switch the mode of preprocessing
)
recog_iter = ChainerDataLoader(
dataset=TransformDataset(
recog, lambda data: converter([load_rc(data)]), utt=True
),
batch_size=1,
num_workers=model.args.n_iter_processes,
shuffle=not use_sortagrad,
collate_fn=lambda x: x[0],
)
logging.info(f'Character list: {model.args.char_list}')
decoder = CTCBeamDecoder(
labels=model.args.char_list, beam_width=args.beam_size, log_probs_input=True
)
with open(args.lang2ph, 'r') as f:
model.lang2ph = json.load(f)
model.lang2phid = {}
for lang, phones in model.lang2ph.items():
phoneset = set(phones + ['<blank>', '<unk>', '<space>', '<eos>'])
phoneset = phoneset.intersection(model.args.char_list)
model.lang2phid[lang] = list(map(model.args.char_list.index, phoneset))
with open(args.recog_json, "rb") as f:
js = json.load(f)["utts"]
new_js = {}
print(args.recog_lang)
for batch in recog_iter:
names, x = batch[0], batch[1:]
# logging.warning(f"Recog deep [names] {names}")
x = _recursive_to(x, device)
xs_pad, ilens, ys_pad = x
logprobs, seq_lens = model.encode_with_length(xs_pad, ilens)
last_dim = logprobs.size(-1)
min_value = float(
numpy.finfo(torch.tensor(0, dtype=logprobs.dtype).numpy().dtype).min
)
masked_dim = [k for k in range(last_dim) if k not in model.lang2phid[args.recog_lang]]
print(masked_dim)
logprobs[:,: ,masked_dim] = min_value
# logging.warning(f'Recog Deep [logprobs] {logprobs.size()}')
out, scores, offsets, seq_lens = decoder.decode(logprobs, seq_lens)
for hyp, trn, length, name, score in zip(out, ys_pad, seq_lens, names, scores): # iterate batch
# logging.warning(f'{score}')
best_hyp = hyp[0,:length[0]]
new_js[name] = add_results_to_json(
js[name], [{"yseq": best_hyp, "score": float(score[0])}], model.args.char_list
)
# logging.warning(f'Recog deep [new_js] {new_js}')
# break
# raise
with open(args.result_label, "wb") as f:
f.write(
json.dumps(
{"utts": new_js}, indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
def modify_data_json(args, js):
feat_folder = args.wav2vec_feat_folder
for uttid, v in js.items():
npy_file = f'{feat_folder}/{uttid}.npy'
assert 'shape' in v['input'][0]
assert 'feat' in v['input'][0]
v['input'][0]['feat'] = npy_file
v['input'][0]['filetype'] = 'npy'
shape = np.load(npy_file, mmap_mode='r').shape[0]
if type(shape) is int:
shape =(shape, 1)
elif len(shape) == 1:
shape = (shape[0], 1)
v['input'][0]['shape'] = shape
# read json data
def read_json_data(args, json_path):
if not hasattr(args, 'wav2vec_feature') or not args.wav2vec_feature:
# use fbank
logging.warning(f'Use fbank features {json_path}')
with open(json_path, "rb") as f:
js = json.load(f)["utts"]
else:
# use wav2vec
if os.path.isfile(f'{json_path}.npy'):
# already modified js
logging.warning(f'Use modified npy json {json_path}.npy')
with open(f'{json_path}.npy', "r") as f:
js = json.load(f)["utts"]
else:
# need to modify js
logging.warning(f'modifying npy json {json_path}')
with open(json_path, "rb") as f:
js = json.load(f)["utts"]
feat_folder = args.wav2vec_feat_folder
for uttid, v in js.items():
npy_file = f'{feat_folder}/{uttid}.npy'
assert 'shape' in v['input'][0]
assert 'feat' in v['input'][0]
v['input'][0]['feat'] = npy_file
v['input'][0]['filetype'] = 'npy'
shape = np.load(npy_file, mmap_mode='r').shape[0]
if type(shape) is int:
shape =(shape, 1)
elif len(shape) == 1:
shape = (shape[0], 1)
v['input'][0]['shape'] = shape
logging.warning(f'saving modified npy json {json_path}')
with open(f'{json_path}.npy', "w") as f:
json.dump({'utts': js}, f, indent=2)
return js
def recog_ctconly(args):
"""Decode with custom models that implements ScorerInterface.
Notes:
The previous backend espnet.asr.pytorch_backend.asr.recog
only supports E2E and RNNLM
Args:
args (namespace): The program arguments.
See py:func:`espnet.bin.asr_recog.get_parser` for details
"""
logging.warning(f'RECOGCTCONLY')
logging.warning("experimental API for custom LMs is selected by --api v2")
if args.batchsize > 1:
raise NotImplementedError("multi-utt batch decoding is not implemented")
if args.streaming_mode is not None:
raise NotImplementedError("streaming mode is not implemented")
if args.word_rnnlm:
raise NotImplementedError("word LM is not implemented")
set_deterministic_pytorch(args)
model, train_args = load_trained_model(args.model)
# add lang2ph to the model
if args.mask_phoneme:
logging.warning(f'mask phoneme and create lang2ph for model')
assert args.lang2ph is not None
with open(args.lang2ph, 'r') as f:
model.lang2ph = json.load(f)
model.lang2phid = {}
for lang, phones in model.lang2ph.items():
phoneset = set(phones + ['<blank>', '<unk>', '<space>', '<eos>'])
phoneset = phoneset.intersection(model.args.char_list)
model.lang2phid[lang] = list(map(model.args.char_list.index, phoneset))
# model.lang2phid[lang] = list(map(model.args.char_list.index, phones+['<blank>', '<unk>', '<space>', '<eos>']))
model.ctc.lang2phid = model.lang2phid
logging.warning(f'model lang2phid {model.lang2phid}')
assert isinstance(model, ASRInterface)
model.eval()
load_inputs_and_targets = LoadInputsAndTargets(
mode="asr",
load_output=False,
sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf,
preprocess_args={"train": False},
)
if args.ngpu == 1:
device = "cuda"
else:
device = "cpu"
dtype = getattr(torch, args.dtype)
logging.info(f"Decoding device={device}, dtype={dtype}")
model.to(device=device, dtype=dtype).eval()
# logging.warning(f'Recog deep [model.args] {model.args}')
# with open(args.recog_json, "rb") as f:
# recog_json = json.load(f)["utts"]
recog_json = read_json_data(model.args, args.recog_json)
if args.recog_size is not None and args.recog_size > 0:
random.seed(args.seed)
items = list(recog_json.items())
random.shuffle(items)
recog_json = OrderedDict(items[:args.recog_size])
logging.warning(f'data json len {len(recog_json)}')
# if model.args.wav2vec_feature:
# modify_data_json(model.args, recog_json)
use_sortagrad = model.args.sortagrad == -1 or model.args.sortagrad > 0
converter = CustomConverter(subsampling_factor=model.subsample[0], dtype=dtype)
# make minibatch list (variable length)
recog = make_batchset(
recog_json,
16, # model.args.batch_size,
model.args.maxlen_in,
model.args.maxlen_out,
model.args.minibatches,
min_batch_size=model.args.ngpu if model.args.ngpu > 1 else 1,
shortest_first=use_sortagrad,
count=model.args.batch_count,
batch_bins=800000, #model.args.batch_bins,
batch_frames_in=model.args.batch_frames_in,
batch_frames_out=model.args.batch_frames_out,
batch_frames_inout=model.args.batch_frames_inout,
iaxis=0,
oaxis=0,
)
load_rc = LoadInputsAndTargets(
mode="asr",
load_output=True,
preprocess_conf=model.args.preprocess_conf,
preprocess_args={"train": False}, # Switch the mode of preprocessing
)
recog_iter = ChainerDataLoader(
dataset=TransformDataset(
recog, lambda data: converter([load_rc(data)]), utt=True, lang_label=args.lang_label
),
batch_size=1,
num_workers=model.args.n_iter_processes,
shuffle=not use_sortagrad,
collate_fn=lambda x: x[0],
)
logging.info(f'Character list: {model.args.char_list}')
logging.warning(f'lang model and weight: {args.lang_model} {args.lang_model_weight}')
decoder = CTCBeamDecoder(
labels=model.args.char_list, beam_width=args.beam_size, log_probs_input=True,
model_path=args.lang_model, alpha=args.lang_model_weight
)
# with open(args.recog_json, "rb") as f:
# js = json.load(f)["utts"]
# if model.args.wav2vec_feature:
# modify_data_json(model.args, js)
js = read_json_data(model.args, args.recog_json)
new_js = {}
with torch.no_grad():
for batch in recog_iter:
lang_labels = None
lang_labels_for_masking = None
if args.lang_label:
lang_labels, names, x = batch[0], batch[1], batch[2:]
if args.mask_phoneme:
lang_labels_for_masking = lang_labels # true lang labels
if args.fake_lang_label:
lang_labels = [args.fake_lang_label] * len(lang_labels)
else:
names, x = batch[0], batch[1:]
logging.warning(f'{lang_labels}')
# if np.prod(list(x[0].size())) >= 350000:
# logging.warning(f'batch {x[0].size()} {np.prod(list(x[0].size())) } too large, skip')
# continue
# logging.warning(f"Recog deep [names] {names}")
x = _recursive_to(x, device)
xs_pad, ilens, ys_pad = x
logprobs, seq_lens = model.encode_with_length(
xs_pad, ilens, lang_labels=lang_labels,
mask_phoneme=args.mask_phoneme,
lang_labels_for_masking=lang_labels_for_masking
)
## just for check
# with open('check.pk', 'wb') as f:
# o = model.args.char_list, logprobs, seq_lens, ys_pad, lang_labels
# pickle.dump(o, f)
# raise
# logging.warning(f'Recog Deep [logprobs] {logprobs.size()}')
out, scores, offsets, seq_lens = decoder.decode(logprobs, seq_lens, )
for hyp, trn, length, name, score in zip(out, ys_pad, seq_lens, names, scores): # iterate batch
# logging.warning(f'{score}')
best_hyp = hyp[0,:length[0]]
new_js[name] = add_results_to_json(
js[name], [{"yseq": best_hyp, "score": float(score[0])}], model.args.char_list
)
# logging.warning(f'Recog deep [new_js] {new_js}')
# break
# raise
with open(args.result_label, "wb") as f:
f.write(
json.dumps(
{"utts": new_js}, indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
def recog_ctconly_lang(args):
"""Decode with custom models that implements ScorerInterface.
Notes:
The previous backend espnet.asr.pytorch_backend.asr.recog
only supports E2E and RNNLM
Args:
args (namespace): The program arguments.
See py:func:`espnet.bin.asr_recog.get_parser` for details
"""
logging.warning(f'RECOGCTCONLYLANG')
logging.warning(f'all_langs {args.train_langs}')
logging.warning("experimental API for custom LMs is selected by --api v2")
if args.batchsize > 1:
raise NotImplementedError("multi-utt batch decoding is not implemented")
if args.streaming_mode is not None:
raise NotImplementedError("streaming mode is not implemented")
if args.word_rnnlm:
raise NotImplementedError("word LM is not implemented")
set_deterministic_pytorch(args)
model, train_args = load_trained_model(args.model)
assert isinstance(model, ASRInterface)
model.eval()
load_inputs_and_targets = LoadInputsAndTargets(
mode="asr",
load_output=False,
sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf,
preprocess_args={"train": False},
)
if args.ngpu == 1:
device = "cuda"
else:
device = "cpu"
dtype = getattr(torch, args.dtype)
logging.info(f"Decoding device={device}, dtype={dtype}")
model.to(device=device, dtype=dtype).eval()
# logging.warning(f'Recog deep [model.args] {model.args}')
with open(args.recog_json, "rb") as f:
recog_json = json.load(f)["utts"]
if args.recog_size is not None and args.recog_size > 0:
random.seed(args.seed)
items = list(recog_json.items())
random.shuffle(items)
recog_json = OrderedDict(items[:args.recog_size])
logging.warning(f'data json len {len(recog_json)}')
use_sortagrad = model.args.sortagrad == -1 or model.args.sortagrad > 0
converter = CustomConverter(subsampling_factor=model.subsample[0], dtype=dtype)
# make minibatch list (variable length)
recog = make_batchset(
recog_json,
16, # model.args.batch_size,
model.args.maxlen_in,
model.args.maxlen_out,
model.args.minibatches,
min_batch_size=model.args.ngpu if model.args.ngpu > 1 else 1,
shortest_first=use_sortagrad,
count=model.args.batch_count,
batch_bins=400000, #model.args.batch_bins,
batch_frames_in=model.args.batch_frames_in,
batch_frames_out=model.args.batch_frames_out,
batch_frames_inout=model.args.batch_frames_inout,
iaxis=0,
oaxis=0,
)
load_rc = LoadInputsAndTargets(
mode="asr",
load_output=True,
preprocess_conf=model.args.preprocess_conf,
preprocess_args={"train": False}, # Switch the mode of preprocessing
)
recog_iter = ChainerDataLoader(
dataset=TransformDataset(
recog, lambda data: converter([load_rc(data)]), utt=True, lang_onehot=True, all_lang=args.train_langs
),
batch_size=1,
num_workers=model.args.n_iter_processes,
shuffle=not use_sortagrad,
collate_fn=lambda x: x[0],
)
logging.info(f'Character list: {model.args.char_list}')
decoder = CTCBeamDecoder(
labels=model.args.char_list, beam_width=args.beam_size, log_probs_input=True
)
with open(args.recog_json, "rb") as f:
js = json.load(f)["utts"]
new_js = {}
for batch in recog_iter:
names, x = batch[0], batch[1:]
# logging.warning(f"Recog deep [names] {names}")
x = _recursive_to(x, device)
langs, xs_pad, ilens, ys_pad = x
logging.warning(f'parameters, names {names}')
logging.warning(f'parameters, langs {langs}')
# logging.warning(f'parameters, names {names}')
logprobs, seq_lens = model.encode_with_length(langs, xs_pad, ilens)
# logging.warning(f'Recog Deep [logprobs] {logprobs.size()}')
out, scores, offsets, seq_lens = decoder.decode(logprobs, seq_lens)
for hyp, trn, length, name, score in zip(out, ys_pad, seq_lens, names, scores): # iterate batch
# logging.warning(f'{score}')
best_hyp = hyp[0,:length[0]]
new_js[name] = add_results_to_json(
js[name], [{"yseq": best_hyp, "score": float(score[0])}], model.args.char_list
)
# logging.warning(f'Recog deep [new_js] {new_js}')
# break
# raise
with open(args.result_label, "wb") as f:
f.write(
json.dumps(
{"utts": new_js}, indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
def recog_seg(args):
"""Decode with custom models that implements ScorerInterface.
Notes:
The previous backend espnet.asr.pytorch_backend.asr.recog
only supports E2E and RNNLM
Args:
args (namespace): The program arguments.
See py:func:`espnet.bin.asr_recog.get_parser` for details
"""
logging.warning(f'RECOGSEG')
logging.warning("experimental API for custom LMs is selected by --api v2")
if args.batchsize > 1:
raise NotImplementedError("multi-utt batch decoding is not implemented")
if args.streaming_mode is not None:
raise NotImplementedError("streaming mode is not implemented")
if args.word_rnnlm:
raise NotImplementedError("word LM is not implemented")
set_deterministic_pytorch(args)
model, train_args = load_trained_model(args.model)
assert isinstance(model, ASRInterface)
model.eval()
load_inputs_and_targets = LoadInputsAndTargets(
mode="asr",
load_output=False,
sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf,
preprocess_args={"train": False},
)
if args.ngpu == 1:
device = "cuda"
else:
device = "cpu"
dtype = getattr(torch, args.dtype)
logging.info(f"Decoding device={device}, dtype={dtype}")
model.to(device=device, dtype=dtype).eval()
# logging.warning(f'Recog deep [model.args] {model.args}')
with open(args.recog_json, "rb") as f:
recog_json = json.load(f)["utts"]
use_sortagrad = model.args.sortagrad == -1 or model.args.sortagrad > 0
converter = CustomConverter(subsampling_factor=model.subsample[0], dtype=dtype)
# make minibatch list (variable length)
recog = make_batchset(
recog_json,
16, # model.args.batch_size,
model.args.maxlen_in,
model.args.maxlen_out,
model.args.minibatches,
min_batch_size=model.args.ngpu if model.args.ngpu > 1 else 1,
shortest_first=use_sortagrad,
count=model.args.batch_count,
batch_bins=400000, #model.args.batch_bins,
batch_frames_in=model.args.batch_frames_in,
batch_frames_out=model.args.batch_frames_out,
batch_frames_inout=model.args.batch_frames_inout,
iaxis=0,
oaxis=0,
)
load_rc = LoadInputsAndTargets(
mode="asr",
load_output=True,
preprocess_conf=model.args.preprocess_conf,
preprocess_args={"train": False}, # Switch the mode of preprocessing
)
recog_iter = ChainerDataLoader(
dataset=TransformDataset(
recog, lambda data: converter([load_rc(data)]), utt=True, lang_label=args.lang2ph
),
batch_size=1,
num_workers=model.args.n_iter_processes,
shuffle=not use_sortagrad,
collate_fn=lambda x: x[0],
)
logging.info(f'Character list: {model.args.char_list}')
if args.lang2ph is not None:
# if args.irm_lang2ph is not None:
with open(args.lang2ph, 'r') as f:
lang2ph = json.load(f)
lang2phid = {}
for lang, phones in lang2ph.items():
lang2phid[lang] = list(map(model.args.char_list.index, phones+['<blank>', '<unk>', '<space>', '<eos>']))
model.lang2phid = lang2phid
logging.warning(f'model lang2phid {model.lang2phid}')
with open(args.recog_json, "rb") as f:
js = json.load(f)["utts"]
new_js = {}
save_embedding = args.embedding_save_dir is not None and args.embedding_save_dir != ''
if save_embedding:
embedding_js = {}
for batch in recog_iter:
if args.lang2ph:
lang_labels, names, x = batch[0], batch[1], batch[2:]
else:
names, x = batch[0], batch[1:]
lang_labels = None
# logging.warning(f"Recog deep [names] {names}")
x = _recursive_to(x, device)
xs_pad, ilens, ys_pad = x
if not save_embedding:
logits = model.encode(xs_pad, ilens, hidden=save_embedding, lang_labels=lang_labels)
embeddings = [0] * len(logits) # dummy list
else:
logits, embeddings = model.encode(xs_pad, ilens, hidden=save_embedding, lang_labels=lang_labels)
# logging.warning(f"Recog logit {logits.size()}")
# torch.argmax(logit.view())
predicts = torch.argmax(logits, dim=1)
probs = torch.softmax(logits, dim=1)
# logging.warning(f"Recog logit {logits.size()} {predicts.size()}")
# logging.warning(f"Recog logit {predicts[:10]}")
# logging.warning(f"Recog logit {ys_pad[:10]}")
for pred, trn, name, logit, prob, embedding in zip(predicts, ys_pad, names, logits, probs, embeddings):
best_hyp = pred.view(-1)
# logging.warning(f'{torch.nn.functional.pad(best_hyp, (1,0))}, {model.args.char_list[pred]}')
new_js[name] = add_results_to_json(
js[name], [{"yseq": torch.nn.functional.pad(best_hyp, (1,0)), "score": float(logit[best_hyp])}], model.args.char_list
)
if save_embedding:
cur_save = {'pred': pred.cpu().numpy(), 'GT': trn.cpu().numpy(), 'prob': prob.cpu().numpy(), 'embedding': embedding.cpu().numpy()}
embedding_js[name] = cur_save
# logging.warning(f'{new_js[name]}')
with open(args.result_label, "wb") as f:
f.write(
json.dumps(
{"utts": new_js}, indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
if save_embedding:
with open(args.embedding_save_dir, "wb") as f:
pickle.dump(embedding_js, f)
|
15,096 | b0bde4ff166ad61681624859b81ebc32d2609bbb | # Generated by Django 2.0.7 on 2018-10-09 00:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course', '0013_auto_20181009_0053'),
]
operations = [
migrations.RemoveField(
model_name='fill',
name='fill_answer',
),
]
|
15,097 | 6c791f333ff2efe0084b5b407ac50e3df55f9262 | #printing condition 3
a=int(input("enter the value of a :"))
#checking the condition
if(a>=80):
print("A grade")
elif(a>70):
print("B grade")
elif(a>=60):
print("C grade")
elif(a>=50):
print("D grade")
elif(a<35):
print("better luck next time") |
15,098 | e585970c4adc62545787b5ce049b683c477f5c2a | from django import template
register = template.Library()
@register.simple_tag(name='GET_string',takes_context=True)
def GET_string(context):
return context['request'].GET.urlencode() |
15,099 | bd154d9d84726e00c63aaf98fa65df20660c3999 | #!/usr/bin/python3
# HISTORY
# 0.2 added correct exiting
# 0.1 initial version
# TODO
#Commands are: HELO, INIT, SET (pin,value), SETALL (values), EXIT
import socket
import time
import sys
import pickle
from _thread import *
BUFSIZ = 1024
class PWM:
def __init__( self, pin ):
self.pin = pin
def set( self, value ):
f = open( "/dev/pi-blaster", "w" )
f.write( '%d=%.3f\n' % (self.pin,value) )
f.close()
def conn_thread( conn ):
global exiting
while not exiting:
request = pickle.loads( conn.recv(BUFSIZ) )
time_start = time.clock()
debug_print( "has been recieved '" + str(request) + "'" )
if request[1] == 'HELO':
reply = [ [request[0],time.clock()-time_start], ["OK", module_name + ' at your service'] ]
elif request[1] == 'INIT':
for m in motors:
m.set( 0 )
reply = [ [request[0],time.clock()-time_start], ["OK", "INIT OK"] ]
debug_print( "INIT OK" )
elif request[1] == 'SET':
m = request[2][0]
vol = request[2][1]
motors[m].set( vol )
reply = [ [request[0],time.clock()-time_start], ["OK"] ]
debug_print( "motor " + str( m ) + ": set " + str(vol) )
elif request[1] == 'SETALL':
debug_msg = 'SETALL: '
for i in range(len(motors)):
vol = request[2][i]
motors[i].set( vol )
debug_msg += '{:0.3f} '.format(vol)
reply = [ [request[0],time.clock()-time_start], ["OK"] ]
debug_print( debug_msg )
elif request[1] == 'EXIT':
for i in range(4):
motors[i].set( 0 )
reply = [ [request[0],time.clock()-time_start], ['OK', 'EXITING'] ]
debug_print( "'EXITING' send" )
else:
reply = [ [request[0],time.clock()-time_start], ['ERR', 'UNKNOWN COMMAND'] ]
debug_print( "sending 'UNKNOWN COMMAND'" )
conn.sendall( pickle.dumps(reply,2) )
if request[1] == 'EXIT':
exiting = True
debug_print( "exit request recieved, exiting" )
s.shutdown( socket.SHUT_RDWR )
conn.close()
def debug_print( msg ):
if not daemon:
# m = "[" + mname + "] " + format(time.clock()-start_clock,"0.3f") + ": " + msg
m = "[" + module_name + "]: " + msg
print( m )
if len(sys.argv) == 3:
host = sys.argv[1]
port = int(sys.argv[2])
daemon = True
else:
host = 'localhost'
port = 10100
daemon = False
module_name = 'mover_' + str(port)
exiting = False
debug_print( "starting at '" + host + ":" + str(port) + "'" )
debug_print( "initializing motors" )
#m0 = PWM( 17 )
#m1 = PWM( 18 )
#m2 = PWM( 27 )
#m3 = PWM( 4 )
m4 = PWM( 22 )
m5 = PWM( 23 )
m6 = PWM( 24 )
m7 = PWM( 25 )
#motors = [ m0, m1, m2, m3 ]
motors = [ m4, m5, m6, m7 ]
#motors = [ m0, m1, m2, m3, m4, m5, m6, m7 ]
for m in motors:
m.set( 0 )
s = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
try:
s.bind( (host,port) )
except socket.error as msg:
debug_print( 'bind failed. error code : ' + str(msg) )
sys.exit()
s.listen(2)
debug_print( 'now listening port' )
while not exiting:
try:
conn,addr = s.accept()
except socket.error as msg:
if exiting:
debug_print( 'exiting exception' )
time.sleep( 1 )
break
else:
debug_print( 'accept failed. error code : ' + str(msg) )
sys.exit()
debug_print( "connected from " + str(addr[0]) + ':' + str(addr[1]) )
start_new_thread( conn_thread, (conn,) )
s.close()
debug_print( "exiting" )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.